NetApp ONTAP: Add storage assisted migration support

This patch adds support for storage assisted migration
(intra-cluster) to NetApp ONTAP drivers (iSCSI/FC/NFS), for the
following use cases:

1) Between pools in a same vserver and  backend/stanza. This
operation is non-disruptive on iSCSI and FC drivers.
2) Between pools in a same vserver but in a different
backend/stanza. This operation is disruptive in all cases
and requires the volume to be in `available` status.
3) Between pools in a different vserver in a different
backend/stanza. This operation is disruptive in all cases
and requires the volume to be in `available` status.

Storage assisted migration is only supported within the same
ONTAP cluster. If a migration between two different clusters is
requested, driver will automatically fallback to host assisted
migration.

Implements: blueprint ontap-storage-assisted-migration
Change-Id: Iaad87c80ae37b6c0fc5f788dc56f1f72c0ca07fa
This commit is contained in:
Fernando Ferraz 2021-07-06 12:27:07 +00:00
parent 086f619cee
commit 2a017f5b3b
20 changed files with 2144 additions and 15 deletions

View File

@ -366,6 +366,7 @@ def list_opts():
cinder_volume_drivers_netapp_options.netapp_san_opts,
cinder_volume_drivers_netapp_options.netapp_replication_opts,
cinder_volume_drivers_netapp_options.netapp_support_opts,
cinder_volume_drivers_netapp_options.netapp_migration_opts,
cinder_volume_drivers_nexenta_options.NEXENTA_CONNECTION_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_ISCSI_OPTS,
cinder_volume_drivers_nexenta_options.NEXENTA_DATASET_OPTS,

View File

@ -71,7 +71,11 @@ FAKE_NA_SERVER_API_1_20.set_api_version(1, 20)
VOLUME_VSERVER_NAME = 'fake_vserver'
VOLUME_NAMES = ('volume1', 'volume2')
VOLUME_NAME = 'volume1'
DEST_VOLUME_NAME = 'volume-dest'
LUN_NAME = 'fake-lun-name'
DEST_LUN_NAME = 'new-fake-lun-name'
FILE_NAME = 'fake-file-name'
DEST_FILE_NAME = 'new-fake-file-name'
FAKE_QUERY = {'volume-attributes': None}
@ -1337,6 +1341,7 @@ REMOTE_CLUSTER_NAME = 'fake_cluster_2'
CLUSTER_ADDRESS_1 = 'fake_cluster_address'
CLUSTER_ADDRESS_2 = 'fake_cluster_address_2'
VSERVER_NAME = 'fake_vserver'
DEST_VSERVER_NAME = 'fake_dest_vserver'
VSERVER_NAME_2 = 'fake_vserver_2'
ADMIN_VSERVER_NAME = 'fake_admin_vserver'
NODE_VSERVER_NAME = 'fake_node_vserver'
@ -1481,3 +1486,70 @@ VSERVER_DATA_LIST_RESPONSE = etree.XML("""
<num-records>1</num-records>
</results>
""" % {'vserver': VSERVER_NAME})
GET_CLUSTER_NAME_RESPONSE = etree.XML("""
<results status="passed">
<attributes>
<cluster-identity-info>
<cluster-name>%(cluster)s</cluster-name>
</cluster-identity-info>
</attributes>
</results>
""" % {'cluster': CLUSTER_NAME})
START_LUN_MOVE_RESPONSE = etree.XML("""
<results status="passed">
<job-uuid>%(job_uuid)s</job-uuid>
</results>
""" % {'job_uuid': fake.JOB_UUID})
GET_LUN_MOVE_STATUS_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<lun-move-info>
<job-status>complete</job-status>
</lun-move-info>
</attributes-list>
</results>
""")
START_LUN_COPY_RESPONSE = etree.XML("""
<results status="passed">
<job-uuid>%(job_uuid)s</job-uuid>
</results>
""" % {'job_uuid': fake.JOB_UUID})
GET_LUN_COPY_STATUS_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<lun-move-info>
<job-status>complete</job-status>
</lun-move-info>
</attributes-list>
</results>
""")
CANCEL_LUN_COPY_RESPONSE = etree.XML("""
<results status="passed" />
""")
START_FILE_COPY_RESPONSE = etree.XML("""
<results status="passed">
<job-uuid>%(job_uuid)s</job-uuid>
</results>
""" % {'job_uuid': fake.JOB_UUID})
GET_FILE_COPY_STATUS_RESPONSE = etree.XML("""
<results status="passed">
<attributes-list>
<file-copy-info>
<scanner-status>complete</scanner-status>
</file-copy-info>
</attributes-list>
<num-records>1</num-records>
</results>
""")
DESTROY_FILE_COPY_RESPONSE = etree.XML("""
<results status="passed" />
""")

View File

@ -3350,7 +3350,8 @@ class NetAppCmodeClientTestCase(test.TestCase):
],
}
self.client.connection.send_request.assert_has_calls([
mock.call('vserver-peer-create', vserver_peer_create_args)])
mock.call('vserver-peer-create', vserver_peer_create_args,
enable_tunneling=False)])
def test_delete_vserver_peer(self):
@ -3396,16 +3397,18 @@ class NetAppCmodeClientTestCase(test.TestCase):
'vserver': fake_client.VSERVER_NAME,
'peer-vserver': fake_client.VSERVER_NAME_2,
}
},
}
}
self.client.send_iter_request.assert_has_calls([
mock.call('vserver-peer-get-iter', vserver_peer_get_iter_args)])
mock.call('vserver-peer-get-iter', vserver_peer_get_iter_args,
enable_tunneling=False)])
expected = [{
'vserver': 'fake_vserver',
'peer-vserver': 'fake_vserver_2',
'peer-state': 'peered',
'peer-cluster': 'fake_cluster'
'peer-cluster': 'fake_cluster',
'applications': ['snapmirror'],
}]
self.assertEqual(expected, result)
@ -4094,3 +4097,252 @@ class NetAppCmodeClientTestCase(test.TestCase):
self.assertRaises(exception.VolumeBackendAPIException,
self.client.get_unique_volume,
api_response)
def test_get_cluster_name(self):
api_response = netapp_api.NaElement(
fake_client.GET_CLUSTER_NAME_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
api_args = {
'desired-attributes': {
'cluster-identity-info': {
'cluster-name': None,
}
}
}
result = self.client.get_cluster_name()
mock_send_request.assert_called_once_with('cluster-identity-get',
api_args,
enable_tunneling=False)
self.assertEqual(fake_client.CLUSTER_NAME, result)
@ddt.data((fake_client.LUN_NAME, fake_client.DEST_VOLUME_NAME, None,
fake_client.VOLUME_NAME),
(fake_client.LUN_NAME, None, fake_client.DEST_LUN_NAME,
fake_client.DEST_VOLUME_NAME))
@ddt.unpack
def test_start_lun_move(self, src_lun_name, src_ontap_vol, dest_lun_name,
dest_ontap_vol):
api_response = netapp_api.NaElement(
fake_client.START_LUN_MOVE_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.start_lun_move(src_lun_name,
dest_ontap_vol,
src_ontap_volume=src_ontap_vol,
dest_lun_name=dest_lun_name)
api_args = {
'paths': [{
'lun-path-pair': {
'destination-path': '/vol/%s/%s' % (dest_ontap_vol,
src_lun_name if
dest_lun_name is None
else dest_lun_name),
'source-path': '/vol/%s/%s' % (dest_ontap_vol
if src_ontap_vol is None
else src_ontap_vol,
src_lun_name)
}
}]
}
mock_send_request.assert_called_once_with('lun-move-start', api_args)
self.assertEqual(fake.JOB_UUID, result)
def test_get_lun_move_status(self):
api_response = netapp_api.NaElement(
fake_client.GET_LUN_MOVE_STATUS_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.get_lun_move_status(fake.JOB_UUID)
api_args = {
'query': {
'lun-move-info': {
'job-uuid': fake.JOB_UUID
}
}
}
mock_send_request.assert_called_once_with('lun-move-get-iter',
api_args)
expected = {
'job-status': 'complete',
'last-failure-reason': None
}
self.assertEqual(expected, result)
@ddt.data((fake_client.LUN_NAME, None, fake_client.VSERVER_NAME,
fake_client.DEST_LUN_NAME, fake_client.DEST_VOLUME_NAME,
fake_client.DEST_VSERVER_NAME),
(fake_client.LUN_NAME, fake_client.VOLUME_NAME, None,
fake_client.DEST_LUN_NAME, fake_client.DEST_VOLUME_NAME,
fake_client.DEST_VSERVER_NAME),
(fake_client.LUN_NAME, fake_client.VOLUME_NAME,
fake_client.VSERVER_NAME, None, fake_client.DEST_VOLUME_NAME,
fake_client.DEST_VSERVER_NAME))
@ddt.unpack
def test_start_lun_copy(self, src_lun_name, src_ontap_vol, src_vserver,
dest_lun_name, dest_ontap_vol, dest_vserver):
api_response = netapp_api.NaElement(
fake_client.START_LUN_COPY_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.start_lun_copy(src_lun_name,
dest_ontap_vol,
dest_vserver,
src_ontap_volume=src_ontap_vol,
src_vserver=src_vserver,
dest_lun_name=dest_lun_name)
api_args = {
'source-vserver': (dest_vserver if not src_vserver
else src_vserver),
'destination-vserver': dest_vserver,
'paths': [{
'lun-path-pair': {
'destination-path': '/vol/%s/%s' % (dest_ontap_vol,
src_lun_name if
dest_lun_name is None
else dest_lun_name),
'source-path': '/vol/%s/%s' % (dest_ontap_vol
if src_ontap_vol is None
else src_ontap_vol,
src_lun_name)
}
}]
}
mock_send_request.assert_called_once_with('lun-copy-start', api_args,
enable_tunneling=False)
self.assertEqual(fake.JOB_UUID, result)
def test_get_lun_copy_status(self):
api_response = netapp_api.NaElement(
fake_client.GET_LUN_COPY_STATUS_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.get_lun_copy_status(fake.JOB_UUID)
api_args = {
'query': {
'lun-copy-info': {
'job-uuid': fake.JOB_UUID
}
}
}
mock_send_request.assert_called_once_with('lun-copy-get-iter',
api_args,
enable_tunneling=False)
expected = {
'job-status': 'complete',
'last-failure-reason': None
}
self.assertEqual(expected, result)
@ddt.data((fake_client.FILE_NAME, None, fake_client.DEST_VOLUME_NAME,
fake_client.DEST_VOLUME_NAME),
(fake_client.FILE_NAME, fake_client.VOLUME_NAME, None,
fake_client.DEST_VOLUME_NAME))
@ddt.unpack
def test_start_file_copy(self, src_file_name, src_ontap_vol,
dest_file_name, dest_ontap_vol):
api_response = netapp_api.NaElement(
fake_client.START_FILE_COPY_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.start_file_copy(src_file_name,
dest_ontap_vol,
src_ontap_volume=src_ontap_vol,
dest_file_name=dest_file_name)
api_args = {
'source-paths': [{
'sfod-operation-path': '%s/%s' % (dest_ontap_vol if
src_ontap_vol is None else
src_ontap_vol,
src_file_name)
}],
'destination-paths': [{
'sfod-operation-path': '%s/%s' % (dest_ontap_vol,
src_file_name if
dest_file_name is None else
dest_file_name)
}],
}
mock_send_request.assert_called_once_with('file-copy-start', api_args,
enable_tunneling=False)
self.assertEqual(fake.JOB_UUID, result)
def test_get_file_copy_status(self):
api_response = netapp_api.NaElement(
fake_client.GET_FILE_COPY_STATUS_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.get_file_copy_status(fake.JOB_UUID)
api_args = {
'query': {
'file-copy-info': {
'job-uuid': fake.JOB_UUID
}
}
}
mock_send_request.assert_called_once_with('file-copy-get-iter',
api_args,
enable_tunneling=False)
expected = {
'job-status': 'complete',
'last-failure-reason': None
}
self.assertEqual(expected, result)
def test_destroy_file_copy(self):
api_response = netapp_api.NaElement(
fake_client.DESTROY_FILE_COPY_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.destroy_file_copy(fake.JOB_UUID)
api_args = {
'job-uuid': fake.JOB_UUID,
'file-index': 0
}
mock_send_request.assert_called_once_with('file-copy-destroy',
api_args,
enable_tunneling=False)
self.assertIsNone(result)
def test_destroy_file_copy_error(self):
mock_send_request = self.mock_object(self.client.connection,
'send_request',
side_effect=netapp_api.NaApiError)
self.assertRaises(netapp_utils.NetAppDriverException,
self.client.destroy_file_copy,
fake.JOB_UUID)
api_args = {
'job-uuid': fake.JOB_UUID,
'file-index': 0
}
mock_send_request.assert_called_once_with('file-copy-destroy',
api_args,
enable_tunneling=False)
def test_cancel_lun_copy(self):
api_response = netapp_api.NaElement(
fake_client.CANCEL_LUN_COPY_RESPONSE)
mock_send_request = self.mock_object(
self.client.connection, 'send_request', return_value=api_response)
result = self.client.cancel_lun_copy(fake.JOB_UUID)
api_args = {
'job-uuid': fake.JOB_UUID
}
mock_send_request.assert_called_once_with('lun-copy-cancel',
api_args,
enable_tunneling=False)
self.assertIsNone(result)
def test_cancel_lun_copy_error(self):
mock_send_request = self.mock_object(self.client.connection,
'send_request',
side_effect=netapp_api.NaApiError)
self.assertRaises(netapp_utils.NetAppDriverException,
self.client.cancel_lun_copy,
fake.JOB_UUID)
api_args = {
'job-uuid': fake.JOB_UUID
}
mock_send_request.assert_called_once_with('lun-copy-cancel',
api_args,
enable_tunneling=False)

View File

@ -49,6 +49,15 @@ NFS_EXPORT_2 = 'nfs-host2:/export'
MOUNT_POINT = '/mnt/nfs'
ATTACHED = 'attached'
DETACHED = 'detached'
DEST_POOL_NAME = 'dest-aggr'
DEST_VSERVER_NAME = 'dest-vserver'
DEST_BACKEND_NAME = 'dest-backend'
DEST_HOST_STRING = '%s@%s#%s' % (HOST_NAME, DEST_BACKEND_NAME, DEST_POOL_NAME)
DEST_EXPORT_PATH = '/fake/export/dest-path'
DEST_NFS_SHARE = '%s:%s' % (SHARE_IP, DEST_EXPORT_PATH)
CLUSTER_NAME = 'fake-cluster-name'
DEST_CLUSTER_NAME = 'fake-dest-cluster-name'
JOB_UUID = 'fb132b04-6422-43ce-9451-ee819f0131a4'
LUN_METADATA = {
'OsType': None,
'SpaceReserved': 'true',

View File

@ -1705,3 +1705,17 @@ class NetAppBlockStorageLibraryTestCase(test.TestCase):
mock_get_snapshots_marked.assert_called_once_with()
mock_delete_snapshot.assert_called_once_with(
fake.VOLUME['name'], fake.SNAPSHOT_NAME)
def test_delete_lun_from_table(self):
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID,
fake.LUN_SIZE, fake.LUN_METADATA)
self.library.lun_table = {fake_lun.name: fake_lun}
self.library._delete_lun_from_table(fake_lun.name)
self.assertEqual({}, self.library.lun_table)
def test_delete_lun_from_table_not_found(self):
fake_lun = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_ID,
fake.LUN_SIZE, fake.LUN_METADATA)
self.library.lun_table = {fake_lun.name: fake_lun}
self.library._delete_lun_from_table('another-fake-lun')
self.assertEqual({fake_lun.name: fake_lun}, self.library.lun_table)

View File

@ -22,6 +22,7 @@ import ddt
from cinder import exception
from cinder.objects import fields
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
import cinder.tests.unit.volume.drivers.netapp.dataontap.fakes as fake
from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes as\
@ -966,3 +967,468 @@ class NetAppBlockStorageCmodeLibraryTestCase(test.TestCase):
self.assertIsNone(snapshots_model_update)
mock__delete_lun.assert_called_once_with(fake.VG_SNAPSHOT['name'])
def test_move_lun(self):
self.library.configuration.netapp_migrate_volume_timeout = 1
fake_job_status = {'job-status': 'complete'}
mock_start_lun_move = self.mock_object(self.zapi_client,
'start_lun_move',
return_value=fake.JOB_UUID)
mock_get_lun_move_status = self.mock_object(
self.zapi_client, 'get_lun_move_status',
return_value=fake_job_status)
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
result = self.library._move_lun(
fake_vol, fake.POOL_NAME, fake.DEST_POOL_NAME,
dest_lun_name=fake.VOLUME_NAME)
mock_start_lun_move.assert_called_with(
fake_vol.name, fake.DEST_POOL_NAME,
src_ontap_volume=fake.POOL_NAME,
dest_lun_name=fake.VOLUME_NAME)
mock_get_lun_move_status.assert_called_once_with(fake.JOB_UUID)
self.assertIsNone(result)
@ddt.data(('data', na_utils.NetAppDriverTimeout),
('destroyed', na_utils.NetAppDriverException))
@ddt.unpack
def test_move_lun_error(self, status_on_error, move_exception):
self.library.configuration.netapp_migrate_volume_timeout = 1
fake_job_status = {
'job-status': status_on_error,
'last-failure-reason': None
}
mock_start_lun_move = self.mock_object(self.zapi_client,
'start_lun_move',
return_value=fake.JOB_UUID)
mock_get_lun_move_status = self.mock_object(
self.zapi_client, 'get_lun_move_status',
return_value=fake_job_status)
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(move_exception,
self.library._move_lun,
fake_vol,
fake.POOL_NAME,
fake.DEST_POOL_NAME,
dest_lun_name=fake.VOLUME_NAME)
mock_start_lun_move.assert_called_with(
fake_vol.name, fake.DEST_POOL_NAME,
src_ontap_volume=fake.POOL_NAME,
dest_lun_name=fake.VOLUME_NAME)
mock_get_lun_move_status.assert_called_with(fake.JOB_UUID)
def test_cancel_lun_copy(self):
mock_cancel_lun_copy = self.mock_object(self.zapi_client,
'cancel_lun_copy')
mock_get_client_for_backend = self.mock_object(
dot_utils, 'get_client_for_backend', return_value=self.zapi_client)
mock_destroy_lun = self.mock_object(self.zapi_client,
'destroy_lun')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
result = self.library._cancel_lun_copy(fake.JOB_UUID,
fake_vol,
fake.DEST_POOL_NAME,
fake.DEST_BACKEND_NAME)
mock_cancel_lun_copy.assert_called_once_with(fake.JOB_UUID)
mock_get_client_for_backend.assert_not_called()
mock_destroy_lun.assert_not_called()
self.assertIsNone(result)
def test_cancel_lun_copy_force_destroy_lun(self):
mock_cancel_lun_copy = self.mock_object(
self.zapi_client, 'cancel_lun_copy',
side_effect=na_utils.NetAppDriverException)
mock_get_client_for_backend = self.mock_object(
dot_utils, 'get_client_for_backend', return_value=self.zapi_client)
mock_destroy_lun = self.mock_object(self.zapi_client, 'destroy_lun')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
result = self.library._cancel_lun_copy(fake.JOB_UUID,
fake_vol,
fake.DEST_POOL_NAME,
fake.DEST_BACKEND_NAME)
mock_cancel_lun_copy.assert_called_once_with(fake.JOB_UUID)
mock_get_client_for_backend.assert_called_once_with(
fake.DEST_BACKEND_NAME)
fake_lun_path = '/vol/%s/%s' % (fake.DEST_POOL_NAME, fake_vol.name)
mock_destroy_lun.assert_called_once_with(fake_lun_path)
self.assertIsNone(result)
def test_cancel_lun_copy_error_on_force_destroy_lun(self):
mock_cancel_lun_copy = self.mock_object(
self.zapi_client, 'cancel_lun_copy',
side_effect=na_utils.NetAppDriverException)
mock_get_client_for_backend = self.mock_object(
dot_utils, 'get_client_for_backend', return_value=self.zapi_client)
mock_destroy_lun = self.mock_object(
self.zapi_client, 'destroy_lun',
side_effect=na_utils.NetAppDriverException)
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
result = self.library._cancel_lun_copy(fake.JOB_UUID,
fake_vol,
fake.DEST_POOL_NAME,
fake.DEST_BACKEND_NAME)
mock_cancel_lun_copy.assert_called_once_with(fake.JOB_UUID)
mock_get_client_for_backend.assert_called_once_with(
fake.DEST_BACKEND_NAME)
fake_lun_path = '/vol/%s/%s' % (fake.DEST_POOL_NAME, fake_vol.name)
mock_destroy_lun.assert_called_once_with(fake_lun_path)
self.assertIsNone(result)
def test_copy_lun(self):
self.library.configuration.netapp_migrate_volume_timeout = 1
fake_job_status = {'job-status': 'complete'}
mock_start_lun_copy = self.mock_object(self.zapi_client,
'start_lun_copy',
return_value=fake.JOB_UUID)
mock_get_lun_copy_status = self.mock_object(
self.zapi_client, 'get_lun_copy_status',
return_value=fake_job_status)
mock_cancel_lun_copy = self.mock_object(
self.library, '_cancel_lun_copy')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
result = self.library._copy_lun(
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME,
fake.DEST_VSERVER_NAME, dest_lun_name=fake.VOLUME_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True)
mock_start_lun_copy.assert_called_with(
fake_vol.name, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME,
src_ontap_volume=fake.POOL_NAME, src_vserver=fake.VSERVER_NAME,
dest_lun_name=fake.VOLUME_NAME)
mock_get_lun_copy_status.assert_called_once_with(fake.JOB_UUID)
mock_cancel_lun_copy.assert_not_called()
self.assertIsNone(result)
@ddt.data(('data', na_utils.NetAppDriverTimeout),
('destroyed', na_utils.NetAppDriverException),
('destroyed', na_utils.NetAppDriverException))
@ddt.unpack
def test_copy_lun_error(self, status_on_error, copy_exception):
self.library.configuration.netapp_migrate_volume_timeout = 1
fake_job_status = {
'job-status': status_on_error,
'last-failure-reason': None
}
mock_start_lun_copy = self.mock_object(self.zapi_client,
'start_lun_copy',
return_value=fake.JOB_UUID)
mock_get_lun_copy_status = self.mock_object(
self.zapi_client, 'get_lun_copy_status',
return_value=fake_job_status)
mock_cancel_lun_copy = self.mock_object(
self.library, '_cancel_lun_copy')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(copy_exception,
self.library._copy_lun,
fake_vol,
fake.POOL_NAME,
fake.VSERVER_NAME,
fake.DEST_POOL_NAME,
fake.DEST_VSERVER_NAME,
dest_lun_name=fake.VOLUME_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_start_lun_copy.assert_called_with(
fake_vol.name, fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME,
src_ontap_volume=fake.POOL_NAME, src_vserver=fake.VSERVER_NAME,
dest_lun_name=fake.VOLUME_NAME)
mock_get_lun_copy_status.assert_called_with(fake.JOB_UUID)
mock_cancel_lun_copy.assert_called_once_with(
fake.JOB_UUID, fake_vol, fake.DEST_POOL_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME)
def test_migrate_volume_to_pool(self):
mock_move_lun = self.mock_object(self.library, '_move_lun')
mock_finish_migrate_volume_to_pool = self.mock_object(
self.library, '_finish_migrate_volume_to_pool')
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
updates = self.library._migrate_volume_to_pool(fake_vol,
fake.POOL_NAME,
fake.DEST_POOL_NAME,
fake.VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_move_lun.assert_called_once_with(fake_vol, fake.POOL_NAME,
fake.DEST_POOL_NAME)
mock_finish_migrate_volume_to_pool.assert_called_once_with(
fake_vol, fake.DEST_POOL_NAME)
self.assertEqual({}, updates)
def test_migrate_volume_to_pool_lun_move_error(self):
mock_move_lun = self.mock_object(
self.library, '_move_lun',
side_effect=na_utils.NetAppDriverException)
mock_finish_migrate_volume_to_pool = self.mock_object(
self.library, '_finish_migrate_volume_to_pool')
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(na_utils.NetAppDriverException,
self.library._migrate_volume_to_pool,
fake_vol,
fake.POOL_NAME,
fake.DEST_POOL_NAME,
fake.VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_move_lun.assert_called_once_with(fake_vol, fake.POOL_NAME,
fake.DEST_POOL_NAME)
mock_finish_migrate_volume_to_pool.assert_not_called()
def test_migrate_volume_to_pool_lun_move_timeout(self):
mock_move_lun = self.mock_object(
self.library, '_move_lun',
side_effect=na_utils.NetAppDriverTimeout)
mock_finish_migrate_volume_to_pool = self.mock_object(
self.library, '_finish_migrate_volume_to_pool')
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
updates = self.library._migrate_volume_to_pool(fake_vol,
fake.POOL_NAME,
fake.DEST_POOL_NAME,
fake.VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_move_lun.assert_called_once_with(fake_vol, fake.POOL_NAME,
fake.DEST_POOL_NAME)
mock_finish_migrate_volume_to_pool.assert_called_once_with(
fake_vol, fake.DEST_POOL_NAME)
self.assertEqual({'status': fields.VolumeStatus.MAINTENANCE}, updates)
def test_finish_migrate_volume_to_pool(self):
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
fake_lun_cache = block_base.NetAppLun(fake.LUN_HANDLE, fake.LUN_NAME,
fake.SIZE, None)
mock_get_lun_from_table = self.mock_object(self.library,
'_get_lun_from_table',
return_value=fake_lun_cache)
self.library._finish_migrate_volume_to_pool(fake_vol,
fake.DEST_POOL_NAME)
mock_get_lun_from_table.assert_called_once_with(fake_vol.name)
expected = {
'Path': '/vol/%s/%s' % (fake.DEST_POOL_NAME, fake_vol.name),
'Volume': fake.DEST_POOL_NAME
}
self.assertEqual(expected, fake_lun_cache.metadata)
def test_migrate_volume_to_vserver(self):
self.library.using_cluster_credentials = True
self.library.backend_name = fake.BACKEND_NAME
mock_create_vserver_peer = self.mock_object(
self.library, 'create_vserver_peer')
mock_copy_lun = self.mock_object(self.library, '_copy_lun')
mock_finish_migrate_volume_to_vserver = self.mock_object(
self.library, '_finish_migrate_volume_to_vserver')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
updates = self.library._migrate_volume_to_vserver(
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME,
fake.DEST_VSERVER_NAME, fake.DEST_BACKEND_NAME)
mock_create_vserver_peer.assert_called_once_with(
fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME,
['lun_copy'])
mock_copy_lun.assert_called_once_with(
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME,
fake.DEST_VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_migrate_volume_to_vserver.assert_called_once_with(fake_vol)
self.assertEqual({}, updates)
@ddt.data(na_utils.NetAppDriverException, na_utils.NetAppDriverTimeout)
def test_migrate_volume_to_vserver_error_on_copy(self, copy_error):
self.library.using_cluster_credentials = True
self.library.backend_name = fake.BACKEND_NAME
self.library.backend_name = fake.BACKEND_NAME
mock_create_vserver_peer = self.mock_object(
self.library, 'create_vserver_peer')
mock_copy_lun = self.mock_object(
self.library, '_copy_lun',
side_effect=copy_error)
mock_finish_migrate_volume_to_vserver = self.mock_object(
self.library, '_finish_migrate_volume_to_vserver')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(copy_error,
self.library._migrate_volume_to_vserver,
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME,
fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_create_vserver_peer.assert_called_once_with(
fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME,
['lun_copy'])
mock_copy_lun.assert_called_once_with(
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME,
fake.DEST_VSERVER_NAME, dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_migrate_volume_to_vserver.assert_not_called()
def test_migrate_volume_to_vserver_volume_is_not_available(self):
self.library.using_cluster_credentials = True
mock_create_vserver_peer = self.mock_object(
self.library, 'create_vserver_peer')
mock_copy_lun = self.mock_object(self.library, '_copy_lun')
mock_finish_migrate_volume_to_vserver = self.mock_object(
self.library, '_finish_migrate_volume_to_vserver')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.IN_USE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(exception.InvalidVolume,
self.library._migrate_volume_to_vserver,
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME,
fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_create_vserver_peer.assert_not_called()
mock_copy_lun.assert_not_called()
mock_finish_migrate_volume_to_vserver.assert_not_called()
def test_migrate_volume_to_vserver_invalid_vserver_peer_applications(self):
self.library.using_cluster_credentials = True
self.library.backend_name = fake.VSERVER_NAME
mock_create_vserver_peer = self.mock_object(
self.library, 'create_vserver_peer',
side_effect=na_utils.NetAppDriverException)
mock_copy_lun = self.mock_object(
self.library, '_copy_lun')
mock_finish_migrate_volume_to_vserver = self.mock_object(
self.library, '_finish_migrate_volume_to_vserver')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(na_utils.NetAppDriverException,
self.library._migrate_volume_to_vserver,
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME,
fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_create_vserver_peer.assert_called_once_with(
fake.VSERVER_NAME, fake.VSERVER_NAME, fake.DEST_VSERVER_NAME,
['lun_copy'])
mock_copy_lun.assert_not_called()
mock_finish_migrate_volume_to_vserver.assert_not_called()
def test_finish_migrate_volume_to_vserver(self):
mock_delete_volume = self.mock_object(self.library, 'delete_volume')
mock_delete_lun_from_table = self.mock_object(
self.library, '_delete_lun_from_table')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.library._finish_migrate_volume_to_vserver(fake_vol)
mock_delete_volume.assert_called_once_with(fake_vol)
mock_delete_lun_from_table.assert_called_once_with(fake_vol.name)
def test_migrate_volume(self):
ctx = mock.Mock()
self.library.backend_name = fake.BACKEND_NAME
self.library.configuration.netapp_vserver = fake.VSERVER_NAME
mock_migrate_volume_ontap_assisted = self.mock_object(
self.library, 'migrate_volume_ontap_assisted', return_value={})
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
result = self.library.migrate_volume(ctx, fake_vol,
fake.DEST_HOST_STRING)
mock_migrate_volume_ontap_assisted.assert_called_once_with(
fake_vol, fake.DEST_HOST_STRING, fake.BACKEND_NAME,
fake.VSERVER_NAME)
self.assertEqual({}, result)

View File

@ -25,6 +25,7 @@ from oslo_utils import units
from cinder import exception
from cinder.image import image_utils
from cinder.objects import fields
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as fake
from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes as \
@ -1870,3 +1871,332 @@ class NetAppCmodeNfsDriverTestCase(test.TestCase):
is_fg_clone = self.driver._is_flexgroup_clone_file_supported()
self.assertTrue(is_fg_clone)
def test_copy_file(self):
self.driver.configuration.netapp_migrate_volume_timeout = 1
fake_job_status = {'job-status': 'complete'}
mock_start_file_copy = self.mock_object(self.driver.zapi_client,
'start_file_copy',
return_value=fake.JOB_UUID)
mock_get_file_copy_status = self.mock_object(
self.driver.zapi_client, 'get_file_copy_status',
return_value=fake_job_status)
mock_cancel_file_copy = self.mock_object(
self.driver, '_cancel_file_copy')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
result = self.driver._copy_file(
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME, fake.DEST_POOL_NAME,
fake.DEST_VSERVER_NAME, dest_file_name=fake.VOLUME_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME, cancel_on_error=True)
mock_start_file_copy.assert_called_with(
fake_vol.name, fake.DEST_POOL_NAME,
src_ontap_volume=fake.POOL_NAME,
dest_file_name=fake.VOLUME_NAME)
mock_get_file_copy_status.assert_called_with(fake.JOB_UUID)
mock_cancel_file_copy.assert_not_called()
self.assertIsNone(result)
@ddt.data(('data', na_utils.NetAppDriverTimeout),
('destroyed', na_utils.NetAppDriverException),
('destroyed', na_utils.NetAppDriverException))
@ddt.unpack
def test_copy_file_error(self, status_on_error, copy_exception):
self.driver.configuration.netapp_migrate_volume_timeout = 1
fake_job_status = {
'job-status': status_on_error,
'last-failure-reason': None
}
mock_start_file_copy = self.mock_object(self.driver.zapi_client,
'start_file_copy',
return_value=fake.JOB_UUID)
mock_get_file_copy_status = self.mock_object(
self.driver.zapi_client, 'get_file_copy_status',
return_value=fake_job_status)
mock_cancel_file_copy = self.mock_object(
self.driver, '_cancel_file_copy')
ctxt = mock.Mock()
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(copy_exception,
self.driver._copy_file,
fake_vol, fake.POOL_NAME, fake.VSERVER_NAME,
fake.DEST_POOL_NAME, fake.DEST_VSERVER_NAME,
dest_file_name=fake.VOLUME_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_start_file_copy.assert_called_with(
fake_vol.name, fake.DEST_POOL_NAME,
src_ontap_volume=fake.POOL_NAME,
dest_file_name=fake.VOLUME_NAME)
mock_get_file_copy_status.assert_called_with(fake.JOB_UUID)
mock_cancel_file_copy.assert_called_once_with(
fake.JOB_UUID, fake_vol, fake.DEST_POOL_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME)
def test_migrate_volume_to_vserver(self):
self.driver.backend_name = fake.BACKEND_NAME
mock_copy_file = self.mock_object(self.driver, '_copy_file')
mock_create_vserver_peer = self.mock_object(self.driver,
'create_vserver_peer')
mock_finish_volume_migration = self.mock_object(
self.driver, '_finish_volume_migration', return_value={})
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
updates = self.driver._migrate_volume_to_vserver(
fake_vol, fake.NFS_SHARE, fake.VSERVER_NAME, fake.DEST_NFS_SHARE,
fake.DEST_VSERVER_NAME, fake.DEST_BACKEND_NAME)
mock_copy_file.assert_called_once_with(
fake_vol, fake.EXPORT_PATH[1:], fake.VSERVER_NAME,
fake.DEST_EXPORT_PATH[1:], fake.DEST_VSERVER_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_create_vserver_peer.assert_called_once_with(
fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME,
['file_copy'])
mock_finish_volume_migration.assert_called_once_with(
fake_vol, fake.DEST_NFS_SHARE)
self.assertEqual({}, updates)
def test_migrate_volume_create_vserver_peer_error(self):
self.driver.backend_name = fake.BACKEND_NAME
mock_copy_file = self.mock_object(
self.driver, '_copy_file',
side_effect=na_utils.NetAppDriverException)
mock_create_vserver_peer = self.mock_object(
self.driver, 'create_vserver_peer',
side_effect=na_utils.NetAppDriverException)
mock_finish_volume_migration = self.mock_object(
self.driver, '_finish_volume_migration')
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(
na_utils.NetAppDriverException,
self.driver._migrate_volume_to_vserver,
fake_vol,
fake.NFS_SHARE,
fake.VSERVER_NAME,
fake.DEST_NFS_SHARE,
fake.DEST_VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_create_vserver_peer.assert_called_once_with(
fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME,
['file_copy'])
mock_copy_file.assert_not_called()
mock_finish_volume_migration.assert_not_called()
def test_migrate_volume_to_vserver_file_copy_error(self):
self.driver.backend_name = fake.BACKEND_NAME
mock_create_vserver_peer = self.mock_object(
self.driver, 'create_vserver_peer')
mock_copy_file = self.mock_object(
self.driver, '_copy_file',
side_effect=na_utils.NetAppDriverException)
mock_finish_volume_migration = self.mock_object(
self.driver, '_finish_volume_migration')
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(
na_utils.NetAppDriverException,
self.driver._migrate_volume_to_vserver,
fake_vol,
fake.NFS_SHARE,
fake.VSERVER_NAME,
fake.DEST_NFS_SHARE,
fake.DEST_VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_create_vserver_peer.assert_called_once_with(
fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME,
['file_copy'])
mock_copy_file.assert_called_once_with(
fake_vol, fake.EXPORT_PATH[1:], fake.VSERVER_NAME,
fake.DEST_EXPORT_PATH[1:], fake.DEST_VSERVER_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_volume_migration.assert_not_called()
def test_migrate_volume_to_vserver_file_copy_timeout(self):
self.driver.backend_name = fake.BACKEND_NAME
mock_create_vserver_peer = self.mock_object(
self.driver, 'create_vserver_peer')
mock_copy_file = self.mock_object(
self.driver, '_copy_file',
side_effect=na_utils.NetAppDriverTimeout)
mock_finish_volume_migration = self.mock_object(
self.driver, '_finish_volume_migration')
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(
na_utils.NetAppDriverTimeout,
self.driver._migrate_volume_to_vserver,
fake_vol,
fake.NFS_SHARE,
fake.VSERVER_NAME,
fake.DEST_NFS_SHARE,
fake.DEST_VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_create_vserver_peer.assert_called_once_with(
fake.VSERVER_NAME, fake.BACKEND_NAME, fake.DEST_VSERVER_NAME,
['file_copy'])
mock_copy_file.assert_called_once_with(
fake_vol, fake.EXPORT_PATH[1:], fake.VSERVER_NAME,
fake.DEST_EXPORT_PATH[1:], fake.DEST_VSERVER_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_volume_migration.assert_not_called()
def test_migrate_volume_to_pool(self):
mock_copy_file = self.mock_object(self.driver, '_copy_file')
mock_finish_volume_migration = self.mock_object(
self.driver, '_finish_volume_migration', return_value={})
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
updates = self.driver._migrate_volume_to_pool(fake_vol,
fake.NFS_SHARE,
fake.DEST_NFS_SHARE,
fake.VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_copy_file.assert_called_once_with(
fake_vol, fake.EXPORT_PATH[1:], fake.VSERVER_NAME,
fake.DEST_EXPORT_PATH[1:], fake.VSERVER_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_volume_migration.assert_called_once_with(
fake_vol, fake.DEST_NFS_SHARE)
self.assertEqual({}, updates)
def test_migrate_volume_to_pool_file_copy_error(self):
mock_copy_file = self.mock_object(
self.driver, '_copy_file',
side_effect=na_utils.NetAppDriverException)
mock_finish_volume_migration = self.mock_object(
self.driver, '_finish_volume_migration')
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(
na_utils.NetAppDriverException,
self.driver._migrate_volume_to_pool,
fake_vol,
fake.NFS_SHARE,
fake.DEST_NFS_SHARE,
fake.VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_copy_file.assert_called_once_with(
fake_vol, fake.EXPORT_PATH[1:], fake.VSERVER_NAME,
fake.DEST_EXPORT_PATH[1:], fake.VSERVER_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_volume_migration.assert_not_called()
def test_migrate_volume_to_pool_file_copy_timeout(self):
mock_copy_file = self.mock_object(
self.driver, '_copy_file',
side_effect=na_utils.NetAppDriverTimeout)
mock_finish_volume_migration = self.mock_object(
self.driver, '_finish_volume_migration')
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID, 'name': fake.VOLUME_NAME}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
self.assertRaises(
na_utils.NetAppDriverTimeout,
self.driver._migrate_volume_to_pool,
fake_vol,
fake.NFS_SHARE,
fake.DEST_NFS_SHARE,
fake.VSERVER_NAME,
fake.DEST_BACKEND_NAME)
mock_copy_file.assert_called_once_with(
fake_vol, fake.EXPORT_PATH[1:], fake.VSERVER_NAME,
fake.DEST_EXPORT_PATH[1:], fake.VSERVER_NAME,
dest_backend_name=fake.DEST_BACKEND_NAME,
cancel_on_error=True)
mock_finish_volume_migration.assert_not_called()
def test_finish_volume_migration(self):
mock_delete_volume = self.mock_object(self.driver, 'delete_volume')
ctxt = mock.Mock()
vol_fields = {'id': fake.VOLUME_ID,
'host': 'fakeHost@%s#%s' % (fake.BACKEND_NAME,
fake.POOL_NAME)}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
result = self.driver._finish_volume_migration(fake_vol,
fake.DEST_POOL_NAME)
mock_delete_volume.assert_called_once_with(fake_vol)
expected = {'provider_location': fake.DEST_POOL_NAME}
self.assertEqual(expected, result)
def test_migrate_volume(self):
ctx = mock.Mock()
self.driver.backend_name = fake.BACKEND_NAME
self.driver.netapp_vserver = fake.VSERVER_NAME
mock_migrate_volume_ontap_assisted = self.mock_object(
self.driver, 'migrate_volume_ontap_assisted', return_value={})
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.AVAILABLE
}
fake_vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
result = self.driver.migrate_volume(ctx, fake_vol,
fake.DEST_HOST_STRING)
mock_migrate_volume_ontap_assisted.assert_called_once_with(
fake_vol, fake.DEST_HOST_STRING, fake.BACKEND_NAME,
fake.VSERVER_NAME)
self.assertEqual({}, result)
def test_migrate_volume_not_in_available_status(self):
ctx = mock.Mock()
self.driver.backend_name = fake.BACKEND_NAME
self.driver.netapp_vserver = fake.VSERVER_NAME
mock_migrate_volume_ontap_assisted = self.mock_object(
self.driver, 'migrate_volume_ontap_assisted', return_value={})
vol_fields = {
'id': fake.VOLUME_ID,
'name': fake.VOLUME_NAME,
'status': fields.VolumeStatus.IN_USE
}
fake_vol = fake_volume.fake_volume_obj(ctx, **vol_fields)
migrated, updates = self.driver.migrate_volume(ctx,
fake_vol,
fake.DEST_HOST_STRING)
mock_migrate_volume_ontap_assisted.assert_not_called()
self.assertFalse(migrated)
self.assertEqual({}, updates)

View File

@ -17,7 +17,10 @@ from unittest import mock
import ddt
from oslo_config import cfg
from cinder.tests.unit import fake_volume
from cinder.tests.unit import test
from cinder.tests.unit.volume.drivers.netapp.dataontap import fakes as\
dataontap_fakes
from cinder.tests.unit.volume.drivers.netapp.dataontap.utils import fakes
from cinder.volume import configuration
from cinder.volume import driver
@ -500,6 +503,85 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
self.src_vserver, self.src_flexvol_name,
self.dest_vserver, self.dest_flexvol_name)
def test_create_vserver_peer(self):
mock_get_client_for_backend = self.mock_object(
utils, 'get_client_for_backend')
get_vserver_peer_response = []
mock_get_vserver_peers = mock_get_client_for_backend.return_value.\
get_vserver_peers
mock_get_vserver_peers.return_value = get_vserver_peer_response
mock_create_vserver_peer = mock_get_client_for_backend.return_value.\
create_vserver_peer
mock_create_vserver_peer.return_value = None
peer_applications = ['snapmirror']
result = self.dm_mixin.create_vserver_peer(
dataontap_fakes.VSERVER_NAME, self.src_backend,
dataontap_fakes.DEST_VSERVER_NAME, peer_applications)
mock_get_vserver_peers.assert_called_once_with(
dataontap_fakes.VSERVER_NAME, dataontap_fakes.DEST_VSERVER_NAME)
mock_create_vserver_peer.assert_called_once_with(
dataontap_fakes.VSERVER_NAME, dataontap_fakes.DEST_VSERVER_NAME,
vserver_peer_application=peer_applications)
self.assertIsNone(result)
def test_create_vserver_peer_already_exists(self):
mock_get_client_for_backend = self.mock_object(
utils, 'get_client_for_backend')
get_vserver_peer_response = [{
'vserver': dataontap_fakes.VSERVER_NAME,
'peer-vserver': dataontap_fakes.DEST_VSERVER_NAME,
'peer-state': 'peered',
'peer-cluster': dataontap_fakes.CLUSTER_NAME,
'applications': ['snapmirror']
}]
mock_get_vserver_peers = mock_get_client_for_backend.return_value. \
get_vserver_peers
mock_get_vserver_peers.return_value = get_vserver_peer_response
mock_create_vserver_peer = mock_get_client_for_backend.return_value. \
create_vserver_peer
mock_create_vserver_peer.return_value = None
peer_applications = ['snapmirror']
result = self.dm_mixin.create_vserver_peer(
dataontap_fakes.VSERVER_NAME, self.src_backend,
dataontap_fakes.DEST_VSERVER_NAME, peer_applications)
mock_get_vserver_peers.assert_called_once_with(
dataontap_fakes.VSERVER_NAME, dataontap_fakes.DEST_VSERVER_NAME)
mock_create_vserver_peer.assert_not_called()
self.assertIsNone(result)
def test_create_vserver_peer_application_not_defined(self):
mock_get_client_for_backend = self.mock_object(
utils, 'get_client_for_backend')
get_vserver_peer_response = [{
'vserver': dataontap_fakes.VSERVER_NAME,
'peer-vserver': dataontap_fakes.DEST_VSERVER_NAME,
'peer-state': 'peered',
'peer-cluster': dataontap_fakes.CLUSTER_NAME,
'applications': ['snapmirror']
}]
mock_get_vserver_peers = mock_get_client_for_backend.return_value. \
get_vserver_peers
mock_get_vserver_peers.return_value = get_vserver_peer_response
mock_create_vserver_peer = mock_get_client_for_backend.return_value. \
create_vserver_peer
mock_create_vserver_peer.return_value = None
peer_applications = ['not a snapmirror application']
self.assertRaises(na_utils.NetAppDriverException,
self.dm_mixin.create_vserver_peer,
dataontap_fakes.VSERVER_NAME,
self.src_backend,
dataontap_fakes.DEST_VSERVER_NAME,
peer_applications)
mock_get_vserver_peers.assert_called_once_with(
dataontap_fakes.VSERVER_NAME, dataontap_fakes.DEST_VSERVER_NAME)
mock_create_vserver_peer.assert_not_called()
def test_quiesce_then_abort_wait_for_quiesced(self):
self.mock_object(time, 'sleep')
self.mock_object(self.mock_dest_client, 'get_snapmirrors',
@ -981,3 +1063,218 @@ class NetAppCDOTDataMotionMixinTestCase(test.TestCase):
self.assertEqual(expected_active_backend_name,
actual_active_backend_name)
self.assertEqual(expected_volume_updates, actual_volume_updates)
def test_migrate_volume_ontap_assisted_is_same_pool(self):
ctxt = mock.Mock()
vol_fields = {'id': dataontap_fakes.VOLUME_ID,
'host': dataontap_fakes.HOST_STRING}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
fake_dest_host = {'host': dataontap_fakes.HOST_STRING}
self.dm_mixin._migrate_volume_to_pool = mock.Mock()
mock_migrate_volume_to_pool = self.dm_mixin._migrate_volume_to_pool
self.dm_mixin._migrate_volume_to_vserver = mock.Mock()
mock_migrate_volume_to_vserver = (
self.dm_mixin._migrate_volume_to_vserver)
migrated, updates = self.dm_mixin.migrate_volume_ontap_assisted(
fake_vol, fake_dest_host, dataontap_fakes.BACKEND_NAME,
dataontap_fakes.DEST_VSERVER_NAME)
mock_migrate_volume_to_pool.assert_not_called()
mock_migrate_volume_to_vserver.assert_not_called()
self.assertTrue(migrated)
self.assertEqual({}, updates)
def test_migrate_volume_ontap_assisted_same_pool_different_backend(self):
CONF.set_override('netapp_vserver', dataontap_fakes.DEST_VSERVER_NAME,
group=self.dest_backend)
ctxt = mock.Mock()
vol_fields = {'id': dataontap_fakes.VOLUME_ID,
'host': dataontap_fakes.HOST_STRING}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
fake_dest_host = {'host': '%s@%s#%s' % (
dataontap_fakes.HOST_NAME,
dataontap_fakes.DEST_BACKEND_NAME,
dataontap_fakes.POOL_NAME)}
self.dm_mixin.using_cluster_credentials = True
self.mock_src_client.get_cluster_name.return_value = (
dataontap_fakes.CLUSTER_NAME)
self.mock_dest_client.get_cluster_name.return_value = (
dataontap_fakes.CLUSTER_NAME)
self.dm_mixin._migrate_volume_to_pool = mock.Mock()
mock_migrate_volume_to_pool = self.dm_mixin._migrate_volume_to_pool
self.dm_mixin._migrate_volume_to_vserver = mock.Mock()
mock_migrate_volume_to_vserver = (
self.dm_mixin._migrate_volume_to_vserver)
migrated, updates = self.dm_mixin.migrate_volume_ontap_assisted(
fake_vol, fake_dest_host, dataontap_fakes.BACKEND_NAME,
dataontap_fakes.DEST_VSERVER_NAME)
utils.get_backend_configuration.assert_called_once_with(
dataontap_fakes.DEST_BACKEND_NAME)
utils.get_client_for_backend.has_calls(
[mock.call(dataontap_fakes.DEST_BACKEND_NAME),
mock.call(dataontap_fakes.BACKEND_NAME)])
self.mock_src_client.get_cluster_name.assert_called()
self.mock_dest_client.get_cluster_name.assert_called()
mock_migrate_volume_to_pool.assert_not_called()
mock_migrate_volume_to_vserver.assert_not_called()
self.assertTrue(migrated)
self.assertEqual({}, updates)
def test_migrate_volume_ontap_assisted_invalid_creds(self):
ctxt = mock.Mock()
vol_fields = {'id': dataontap_fakes.VOLUME_ID,
'host': dataontap_fakes.HOST_STRING}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
fake_dest_host = {'host': dataontap_fakes.DEST_HOST_STRING}
self.dm_mixin.using_cluster_credentials = False
self.mock_dest_config.netapp_vserver = dataontap_fakes.VSERVER_NAME
self.dm_mixin._migrate_volume_to_pool = mock.Mock()
mock_migrate_volume_to_pool = self.dm_mixin._migrate_volume_to_pool
self.dm_mixin._migrate_volume_to_vserver = mock.Mock()
mock_migrate_volume_to_vserver = (
self.dm_mixin._migrate_volume_to_vserver)
migrated, updates = self.dm_mixin.migrate_volume_ontap_assisted(
fake_vol, fake_dest_host, dataontap_fakes.BACKEND_NAME,
dataontap_fakes.DEST_VSERVER_NAME)
utils.get_backend_configuration.assert_not_called()
utils.get_client_for_backend.assert_not_called()
self.mock_src_client.get_cluster_name.assert_not_called()
self.mock_dest_client.get_cluster_name.assert_not_called()
mock_migrate_volume_to_pool.assert_not_called()
mock_migrate_volume_to_vserver.assert_not_called()
self.assertFalse(migrated)
self.assertEqual({}, updates)
def test_migrate_volume_ontap_assisted_dest_pool_not_in_same_cluster(self):
CONF.set_override('netapp_vserver', dataontap_fakes.DEST_VSERVER_NAME,
group=self.dest_backend)
ctxt = mock.Mock()
vol_fields = {'id': dataontap_fakes.VOLUME_ID,
'host': dataontap_fakes.HOST_STRING}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
fake_dest_host = {'host': dataontap_fakes.DEST_HOST_STRING}
self.dm_mixin.using_cluster_credentials = True
self.mock_src_client.get_cluster_name.return_value = (
dataontap_fakes.CLUSTER_NAME)
self.mock_dest_client.get_cluster_name.return_value = (
dataontap_fakes.DEST_CLUSTER_NAME)
self.dm_mixin._migrate_volume_to_pool = mock.Mock()
mock_migrate_volume_to_pool = self.dm_mixin._migrate_volume_to_pool
self.dm_mixin._migrate_volume_to_vserver = mock.Mock()
mock_migrate_volume_to_vserver = (
self.dm_mixin._migrate_volume_to_vserver)
migrated, updates = self.dm_mixin.migrate_volume_ontap_assisted(
fake_vol, fake_dest_host, dataontap_fakes.BACKEND_NAME,
dataontap_fakes.DEST_VSERVER_NAME)
utils.get_backend_configuration.assert_called_once_with(
dataontap_fakes.DEST_BACKEND_NAME)
utils.get_client_for_backend.has_calls(
[mock.call(dataontap_fakes.DEST_BACKEND_NAME),
mock.call(dataontap_fakes.BACKEND_NAME)])
self.mock_src_client.get_cluster_name.assert_called()
self.mock_dest_client.get_cluster_name.assert_called()
mock_migrate_volume_to_pool.assert_not_called()
mock_migrate_volume_to_vserver.assert_not_called()
self.assertFalse(migrated)
self.assertEqual({}, updates)
@ddt.data((dataontap_fakes.BACKEND_NAME, True),
(dataontap_fakes.DEST_BACKEND_NAME, False))
@ddt.unpack
def test_migrate_volume_ontap_assisted_same_vserver(self,
dest_backend_name,
is_same_backend):
CONF.set_override('netapp_vserver', dataontap_fakes.VSERVER_NAME,
group=self.dest_backend)
ctxt = mock.Mock()
vol_fields = {'id': dataontap_fakes.VOLUME_ID,
'host': dataontap_fakes.HOST_STRING}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
fake_dest_host = {'host': '%s@%s#%s' % (
dataontap_fakes.HOST_NAME,
dest_backend_name,
dataontap_fakes.DEST_POOL_NAME)}
self.dm_mixin.using_cluster_credentials = True
self.mock_src_client.get_cluster_name.return_value = (
dataontap_fakes.CLUSTER_NAME)
self.mock_dest_client.get_cluster_name.return_value = (
dataontap_fakes.CLUSTER_NAME)
self.dm_mixin._migrate_volume_to_pool = mock.Mock()
mock_migrate_volume_to_pool = self.dm_mixin._migrate_volume_to_pool
mock_migrate_volume_to_pool.return_value = {}
self.dm_mixin._migrate_volume_to_vserver = mock.Mock()
mock_migrate_volume_to_vserver = (
self.dm_mixin._migrate_volume_to_vserver)
migrated, updates = self.dm_mixin.migrate_volume_ontap_assisted(
fake_vol, fake_dest_host, dataontap_fakes.BACKEND_NAME,
dataontap_fakes.VSERVER_NAME)
if is_same_backend:
utils.get_backend_configuration.assert_not_called()
utils.get_client_for_backend.assert_not_called()
self.mock_src_client.get_cluster_name.assert_not_called()
self.mock_dest_client.get_cluster_name.assert_not_called()
else:
utils.get_backend_configuration.assert_called_once_with(
dest_backend_name)
utils.get_client_for_backend.has_calls(
[mock.call(dest_backend_name),
mock.call(dataontap_fakes.BACKEND_NAME)])
self.mock_src_client.get_cluster_name.assert_called()
self.mock_dest_client.get_cluster_name.assert_called()
mock_migrate_volume_to_pool.assert_called_once_with(
fake_vol, dataontap_fakes.POOL_NAME,
dataontap_fakes.DEST_POOL_NAME,
dataontap_fakes.VSERVER_NAME,
dest_backend_name)
mock_migrate_volume_to_vserver.assert_not_called()
self.assertTrue(migrated)
self.assertEqual({}, updates)
def test_migrate_volume_different_vserver(self):
CONF.set_override('netapp_vserver', dataontap_fakes.DEST_VSERVER_NAME,
group=self.dest_backend)
ctxt = mock.Mock()
vol_fields = {'id': dataontap_fakes.VOLUME_ID,
'host': dataontap_fakes.HOST_STRING}
fake_vol = fake_volume.fake_volume_obj(ctxt, **vol_fields)
fake_dest_host = {'host': dataontap_fakes.DEST_HOST_STRING}
self.dm_mixin.using_cluster_credentials = True
self.mock_src_client.get_cluster_name.return_value = (
dataontap_fakes.CLUSTER_NAME)
self.mock_dest_client.get_cluster_name.return_value = (
dataontap_fakes.CLUSTER_NAME)
self.dm_mixin._migrate_volume_to_pool = mock.Mock()
mock_migrate_volume_to_pool = self.dm_mixin._migrate_volume_to_pool
self.dm_mixin._migrate_volume_to_vserver = mock.Mock()
mock_migrate_volume_to_vserver = (
self.dm_mixin._migrate_volume_to_vserver)
mock_migrate_volume_to_vserver.return_value = {}
migrated, updates = self.dm_mixin.migrate_volume_ontap_assisted(
fake_vol, fake_dest_host, dataontap_fakes.BACKEND_NAME,
dataontap_fakes.VSERVER_NAME)
utils.get_backend_configuration.assert_called_once_with(
dataontap_fakes.DEST_BACKEND_NAME)
utils.get_client_for_backend.has_calls(
[mock.call(dataontap_fakes.DEST_BACKEND_NAME),
mock.call(dataontap_fakes.BACKEND_NAME)])
self.mock_src_client.get_cluster_name.assert_called()
self.mock_dest_client.get_cluster_name.assert_called()
mock_migrate_volume_to_pool.assert_not_called()
mock_migrate_volume_to_vserver.assert_called_once_with(
fake_vol, dataontap_fakes.POOL_NAME, dataontap_fakes.VSERVER_NAME,
dataontap_fakes.DEST_POOL_NAME, dataontap_fakes.DEST_VSERVER_NAME,
dataontap_fakes.DEST_BACKEND_NAME)
self.assertTrue(migrated)
self.assertEqual({}, updates)

View File

@ -503,6 +503,11 @@ class NetAppBlockStorageLibrary(object):
self.zapi_client.add_igroup_initiator(igroup_name, initiator)
return igroup_name
def _delete_lun_from_table(self, name):
"""Deletes LUN from cache table."""
if self.lun_table.get(name, None):
self.lun_table.pop(name)
def _add_lun_to_table(self, lun):
"""Adds LUN to cache table."""
if not isinstance(lun, NetAppLun):

View File

@ -25,6 +25,8 @@ Volume driver library for NetApp C-mode block storage systems.
"""
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
import six
@ -57,10 +59,11 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
1.0.0 - Driver development before Wallaby
2.0.0 - Add support for QoS minimums specs
Add support for dynamic Adaptive QoS policy group creation
3.0.0 - Add support for Intra-cluster Storage assisted volume migration
"""
VERSION = "2.0.0"
VERSION = "3.0.0"
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
@ -605,3 +608,204 @@ class NetAppBlockStorageCmodeLibrary(block_base.NetAppBlockStorageLibrary,
volume_model_updates.append(volume_model_update)
return None, volume_model_updates
def _move_lun(self, volume, src_ontap_volume, dest_ontap_volume,
dest_lun_name=None):
"""Moves LUN from an ONTAP volume to another."""
job_uuid = self.zapi_client.start_lun_move(
volume.name, dest_ontap_volume, src_ontap_volume=src_ontap_volume,
dest_lun_name=dest_lun_name)
LOG.debug('Start moving LUN %s from %s to %s. '
'Job UUID is %s.', volume.name, src_ontap_volume,
dest_ontap_volume, job_uuid)
def _wait_lun_move_complete():
move_status = self.zapi_client.get_lun_move_status(job_uuid)
LOG.debug('Waiting for LUN move job %s to complete. '
'Current status is: %s.', job_uuid,
move_status['job-status'])
if not move_status:
status_error_msg = (_("Error moving LUN %s. The "
"corresponding Job UUID % doesn't "
"exist."))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id, job_uuid))
elif move_status['job-status'] == 'destroyed':
status_error_msg = (_('Error moving LUN %s. %s.'))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id,
move_status['last-failure-reason']))
elif move_status['job-status'] == 'complete':
raise loopingcall.LoopingCallDone()
try:
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait_lun_move_complete)
timer.start(
interval=15,
timeout=self.configuration.netapp_migrate_volume_timeout
).wait()
except loopingcall.LoopingCallTimeOut:
msg = (_('Timeout waiting to complete move operation of LUN %s.'))
raise na_utils.NetAppDriverTimeout(msg % volume.id)
def _cancel_lun_copy(self, job_uuid, volume, dest_pool, dest_backend_name):
"""Cancel an on-going lun copy operation."""
try:
# NOTE(sfernand): Another approach would be first checking if
# the copy operation isn't in `destroying` or `destroyed` states
# before issuing cancel.
self.zapi_client.cancel_lun_copy(job_uuid)
except na_utils.NetAppDriverException:
dest_client = dot_utils.get_client_for_backend(dest_backend_name)
lun_path = '/vol/%s/%s' % (dest_pool, volume.name)
try:
dest_client.destroy_lun(lun_path)
except Exception:
LOG.warn('Error cleaning up LUN %s in destination volume. '
'Verify if destination volume still exists in pool '
'%s and delete it manually to avoid unused '
'resources.', lun_path, dest_pool)
def _copy_lun(self, volume, src_ontap_volume, src_vserver,
dest_ontap_volume, dest_vserver, dest_lun_name=None,
dest_backend_name=None, cancel_on_error=False):
"""Copies LUN from an ONTAP volume to another."""
job_uuid = self.zapi_client.start_lun_copy(
volume.name, dest_ontap_volume, dest_vserver,
src_ontap_volume=src_ontap_volume, src_vserver=src_vserver,
dest_lun_name=dest_lun_name)
LOG.debug('Start copying LUN %(vol)s from '
'%(src_vserver)s:%(src_ontap_vol)s to '
'%(dest_vserver)s:%(dest_ontap_vol)s. Job UUID is %(job)s.',
{'vol': volume.name, 'src_vserver': src_vserver,
'src_ontap_vol': src_ontap_volume,
'dest_vserver': dest_vserver,
'dest_ontap_vol': dest_ontap_volume,
'job': job_uuid})
def _wait_lun_copy_complete():
copy_status = self.zapi_client.get_lun_copy_status(job_uuid)
LOG.debug('Waiting for LUN copy job %s to complete. Current '
'status is: %s.', job_uuid, copy_status['job-status'])
if not copy_status:
status_error_msg = (_("Error copying LUN %s. The "
"corresponding Job UUID % doesn't "
"exist."))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id, job_uuid))
elif copy_status['job-status'] == 'destroyed':
status_error_msg = (_('Error copying LUN %s. %s.'))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id,
copy_status['last-failure-reason']))
elif copy_status['job-status'] == 'complete':
raise loopingcall.LoopingCallDone()
try:
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait_lun_copy_complete)
timer.start(
interval=10,
timeout=self.configuration.netapp_migrate_volume_timeout
).wait()
except Exception as e:
with excutils.save_and_reraise_exception() as ctxt:
if cancel_on_error:
self._cancel_lun_copy(job_uuid, volume, dest_ontap_volume,
dest_backend_name=dest_backend_name)
if isinstance(e, loopingcall.LoopingCallTimeOut):
ctxt.reraise = False
msg = (_('Timeout waiting volume %s to complete '
'migration.'))
raise na_utils.NetAppDriverTimeout(msg % volume.id)
def _finish_migrate_volume_to_vserver(self, src_volume):
"""Finish volume migration to another vserver within the cluster."""
# The source volume can be safely deleted after a successful migration.
self.delete_volume(src_volume)
# LUN cache for current backend can be deleted after migration.
self._delete_lun_from_table(src_volume.name)
def _migrate_volume_to_vserver(self, volume, src_pool, src_vserver,
dest_pool, dest_vserver, dest_backend_name):
"""Migrate volume to a another vserver within the same cluster."""
LOG.info('Migrating volume %(vol)s from '
'%(src_vserver)s:%(src_ontap_vol)s to '
'%(dest_vserver)s:%(dest_ontap_vol)s.',
{'vol': volume.id, 'src_vserver': src_vserver,
'src_ontap_vol': src_pool, 'dest_vserver': dest_vserver,
'dest_ontap_vol': dest_pool})
# NOTE(sfernand): Migrating to a different vserver relies on coping
# operations which are always disruptive, as it requires the
# destination volume to be added as a new block device to the Nova
# instance. This differs from migrating volumes in a same vserver,
# since we can make use of a LUN move operation without the
# need of changing the iSCSI target.
if volume.status != fields.VolumeStatus.AVAILABLE:
msg = _("Volume status must be 'available' in order to "
"migrate volume to another vserver.")
LOG.error(msg)
raise exception.InvalidVolume(reason=msg)
vserver_peer_application = 'lun_copy'
self.create_vserver_peer(src_vserver, self.backend_name, dest_vserver,
[vserver_peer_application])
self._copy_lun(volume, src_pool, src_vserver, dest_pool,
dest_vserver, dest_backend_name=dest_backend_name,
cancel_on_error=True)
self._finish_migrate_volume_to_vserver(volume)
LOG.info('Successfully migrated volume %(vol)s from '
'%(src_vserver)s:%(src_ontap_vol)s '
'to %(dest_vserver)s:%(dest_ontap_vol)s.',
{'vol': volume.id, 'src_vserver': src_vserver,
'src_ontap_vol': src_pool, 'dest_vserver': dest_vserver,
'dest_ontap_vol': dest_pool})
# No model updates are necessary, so return empty dict
return {}
def _finish_migrate_volume_to_pool(self, src_volume, dest_pool):
"""Finish volume migration to another pool within the same vserver."""
# LUN cache must be updated with new path and volume information.
lun = self._get_lun_from_table(src_volume.name)
new_lun_path = '/vol/%s/%s' % (dest_pool, src_volume.name)
lun.metadata['Path'] = new_lun_path
lun.metadata['Volume'] = dest_pool
def _migrate_volume_to_pool(self, volume, src_pool, dest_pool, vserver,
dest_backend_name):
"""Migrate volume to another Cinder Pool within the same vserver."""
LOG.info('Migrating volume %(vol)s from pool %(src)s to '
'%(dest)s within vserver %(vserver)s.',
{'vol': volume.id, 'src': src_pool, 'dest': dest_pool,
'vserver': vserver})
updates = {}
try:
self._move_lun(volume, src_pool, dest_pool)
except na_utils.NetAppDriverTimeout:
error_msg = (_('Timeout waiting volume %s to complete migration.'
'Volume status is set to maintenance to prevent '
'performing operations with this volume. Check the '
'migration status on the storage side and set '
'volume status manually if migration succeeded.'))
LOG.warn(error_msg, volume.id)
updates['status'] = fields.VolumeStatus.MAINTENANCE
except na_utils.NetAppDriverException as e:
error_msg = (_('Failed to migrate volume %(vol)s from pool '
'%(src)s to %(dest)s. %(err)s'))
raise na_utils.NetAppDriverException(
error_msg % {'vol': volume.id, 'src': src_pool,
'dest': dest_pool, 'err': e})
self._finish_migrate_volume_to_pool(volume, dest_pool)
LOG.info('Successfully migrated volume %(vol)s from pool %(src)s '
'to %(dest)s within vserver %(vserver)s.',
{'vol': volume.id, 'src': src_pool, 'dest': dest_pool,
'vserver': vserver})
return updates
def migrate_volume(self, context, volume, host):
"""Migrate Cinder volume to the specified pool or vserver."""
return self.migrate_volume_ontap_assisted(
volume, host, self.backend_name, self.configuration.netapp_vserver)

View File

@ -611,6 +611,169 @@ class Client(client_base.Client):
clone_create.add_child_elem(block_ranges)
self.connection.invoke_successfully(clone_create, True)
def start_file_copy(self, file_name, dest_ontap_volume,
src_ontap_volume=None,
dest_file_name=None):
"""Starts a file copy operation between ONTAP volumes."""
if src_ontap_volume is None:
src_ontap_volume = dest_ontap_volume
if dest_file_name is None:
dest_file_name = file_name
api_args = {
'source-paths': [{
'sfod-operation-path': '%s/%s' % (src_ontap_volume,
file_name)
}],
'destination-paths': [{
'sfod-operation-path': '%s/%s' % (dest_ontap_volume,
dest_file_name),
}],
}
result = self.connection.send_request('file-copy-start', api_args,
enable_tunneling=False)
return result.get_child_content('job-uuid')
def destroy_file_copy(self, job_uuid):
"""Cancel/Destroy a in-progress file copy."""
api_args = {
'job-uuid': job_uuid,
'file-index': 0
}
try:
self.connection.send_request('file-copy-destroy', api_args,
enable_tunneling=False)
except netapp_api.NaApiError as e:
msg = (_('Could not cancel lun copy for job uuid %s. %s'))
raise na_utils.NetAppDriverException(msg % (job_uuid, e))
def get_file_copy_status(self, job_uuid):
"""Get file copy job status from a given job's UUID."""
api_args = {
'query': {
'file-copy-info': {
'job-uuid': job_uuid
}
}
}
result = self.connection.send_request('file-copy-get-iter', api_args,
enable_tunneling=False)
lun_copy_info_list = result.get_child_by_name('attributes-list')
if lun_copy_info_list:
lun_copy_info = lun_copy_info_list.get_children()[0]
copy_status = {
'job-status':
lun_copy_info.get_child_content('scanner-status'),
'last-failure-reason':
lun_copy_info.get_child_content('last-failure-reason')
}
return copy_status
return None
def start_lun_copy(self, lun_name, dest_ontap_volume, dest_vserver,
src_ontap_volume=None, src_vserver=None,
dest_lun_name=None):
"""Starts a lun copy operation between ONTAP volumes."""
if src_ontap_volume is None:
src_ontap_volume = dest_ontap_volume
if src_vserver is None:
src_vserver = dest_vserver
if dest_lun_name is None:
dest_lun_name = lun_name
api_args = {
'source-vserver': src_vserver,
'destination-vserver': dest_vserver,
'paths': [{
'lun-path-pair': {
'destination-path': '/vol/%s/%s' % (dest_ontap_volume,
dest_lun_name),
'source-path': '/vol/%s/%s' % (src_ontap_volume,
lun_name)}
}],
}
result = self.connection.send_request('lun-copy-start', api_args,
enable_tunneling=False)
return result.get_child_content('job-uuid')
def cancel_lun_copy(self, job_uuid):
"""Cancel an in-progress lun copy."""
api_args = {
'job-uuid': job_uuid
}
try:
self.connection.send_request('lun-copy-cancel', api_args,
enable_tunneling=False)
except netapp_api.NaApiError as e:
msg = (_('Could not cancel lun copy for job uuid %s. %s'))
raise na_utils.NetAppDriverException(msg % (job_uuid, e))
def get_lun_copy_status(self, job_uuid):
"""Get lun copy job status from a given job's UUID."""
api_args = {
'query': {
'lun-copy-info': {
'job-uuid': job_uuid
}
}
}
result = self.connection.send_request('lun-copy-get-iter', api_args,
enable_tunneling=False)
lun_copy_info_list = result.get_child_by_name('attributes-list')
if lun_copy_info_list:
lun_copy_info = lun_copy_info_list.get_children()[0]
copy_status = {
'job-status':
lun_copy_info.get_child_content('job-status'),
'last-failure-reason':
lun_copy_info.get_child_content('last-failure-reason')
}
return copy_status
return None
def start_lun_move(self, lun_name, dest_ontap_volume,
src_ontap_volume=None, dest_lun_name=None):
"""Starts a lun move operation between ONTAP volumes."""
if dest_lun_name is None:
dest_lun_name = lun_name
if src_ontap_volume is None:
src_ontap_volume = dest_ontap_volume
api_args = {
'paths': [{
'lun-path-pair': {
'destination-path': '/vol/%s/%s' % (dest_ontap_volume,
dest_lun_name),
'source-path': '/vol/%s/%s' % (src_ontap_volume,
lun_name)}
}]
}
result = self.connection.send_request('lun-move-start', api_args)
return result.get_child_content('job-uuid')
def get_lun_move_status(self, job_uuid):
"""Get lun move job status from a given job's UUID."""
api_args = {
'query': {
'lun-move-info': {
'job-uuid': job_uuid
}
}
}
result = self.connection.send_request('lun-move-get-iter', api_args)
lun_move_info_list = result.get_child_by_name('attributes-list')
if lun_move_info_list:
lun_move_info = lun_move_info_list.get_children()[0]
move_status = {
'job-status':
lun_move_info.get_child_content('job-status'),
'last-failure-reason':
lun_move_info.get_child_content('last-failure-reason')
}
return move_status
return None
def get_lun_by_args(self, **args):
"""Retrieves LUN with specified args."""
lun_iter = netapp_api.NaElement('lun-get-iter')
@ -2030,6 +2193,22 @@ class Client(client_base.Client):
msg_args = {'snap': snapshot_name, 'vol': volume_name}
raise exception.VolumeBackendAPIException(data=msg % msg_args)
def get_cluster_name(self):
"""Gets cluster name."""
api_args = {
'desired-attributes': {
'cluster-identity-info': {
'cluster-name': None,
}
}
}
result = self.connection.send_request('cluster-identity-get', api_args,
enable_tunneling=False)
attributes = result.get_child_by_name('attributes')
cluster_identity = attributes.get_child_by_name(
'cluster-identity-info')
return cluster_identity.get_child_content('cluster-name')
def create_cluster_peer(self, addresses, username=None, password=None,
passphrase=None):
"""Creates a cluster peer relationship."""
@ -2160,16 +2339,24 @@ class Client(client_base.Client):
self.connection.send_request('cluster-peer-policy-modify', api_args)
def create_vserver_peer(self, vserver_name, peer_vserver_name):
"""Creates a Vserver peer relationship for SnapMirrors."""
def create_vserver_peer(self, vserver_name, peer_vserver_name,
vserver_peer_application=None):
"""Creates a Vserver peer relationship."""
# default peering application to `snapmirror` if none is specified.
if not vserver_peer_application:
vserver_peer_application = ['snapmirror']
api_args = {
'vserver': vserver_name,
'peer-vserver': peer_vserver_name,
'applications': [
{'vserver-peer-application': 'snapmirror'},
{'vserver-peer-application': app}
for app in vserver_peer_application
],
}
self.connection.send_request('vserver-peer-create', api_args)
self.connection.send_request('vserver-peer-create', api_args,
enable_tunneling=False)
def delete_vserver_peer(self, vserver_name, peer_vserver_name):
"""Deletes a Vserver peer relationship."""
@ -2196,7 +2383,8 @@ class Client(client_base.Client):
api_args['query']['vserver-peer-info']['peer-vserver'] = (
peer_vserver_name)
result = self.send_iter_request('vserver-peer-get-iter', api_args)
result = self.send_iter_request('vserver-peer-get-iter', api_args,
enable_tunneling=False)
if not self._has_records(result):
return []
@ -2213,6 +2401,9 @@ class Client(client_base.Client):
vserver_peer_info.get_child_content('peer-state'),
'peer-cluster':
vserver_peer_info.get_child_content('peer-cluster'),
'applications': [app.get_content() for app in
vserver_peer_info.get_child_by_name(
'applications').get_children()],
}
vserver_peers.append(vserver_peer)
@ -2373,7 +2564,7 @@ class Client(client_base.Client):
def delete_snapmirror(self, source_vserver, source_volume,
destination_vserver, destination_volume):
"""Destroys a SnapMirror relationship."""
"""Destroys an SnapMirror relationship."""
self._ensure_snapmirror_v2()
api_args = {

View File

@ -26,7 +26,19 @@ from cinder.zonemanager import utils as fczm_utils
@interface.volumedriver
class NetAppCmodeFibreChannelDriver(driver.BaseVD,
driver.ManageableVD):
"""NetApp C-mode FibreChannel volume driver."""
"""NetApp C-mode FibreChannel volume driver.
Version history:
.. code-block:: none
1.0.0 - Driver development before Wallaby
2.0.0 - Wallaby driver version bump
3.0.0 - Add support for Intra-cluster Storage assisted volume migration
"""
VERSION = "3.0.0"
DRIVER_NAME = 'NetApp_FibreChannel_Cluster_direct'
@ -140,3 +152,6 @@ class NetAppCmodeFibreChannelDriver(driver.BaseVD,
def failover_host(self, context, volumes, secondary_id=None, groups=None):
return self.library.failover_host(
context, volumes, secondary_id=secondary_id)
def migrate_volume(self, context, volume, host):
return self.library.migrate_volume(context, volume, host)

View File

@ -135,3 +135,6 @@ class NetAppCmodeISCSIDriver(driver.BaseVD,
def failover_host(self, context, volumes, secondary_id=None, groups=None):
return self.library.failover_host(
context, volumes, secondary_id=secondary_id)
def migrate_volume(self, context, volume, host):
return self.library.migrate_volume(context, volume, host)

View File

@ -25,6 +25,7 @@ import os
import uuid
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
import six
@ -61,10 +62,11 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
2.0.0 - Add support for QoS minimums specs
Add support for dynamic Adaptive QoS policy group creation
Implement FlexGroup pool
3.0.0 - Add support for Intra-cluster Storage assisted volume migration
"""
VERSION = "2.0.0"
VERSION = "3.0.0"
REQUIRED_CMODE_FLAGS = ['netapp_vserver']
@ -984,3 +986,150 @@ class NetAppCmodeNfsDriver(nfs_base.NetAppNfsDriver,
def _is_flexgroup_clone_file_supported(self):
"""Check whether storage can perform clone file for FlexGroup"""
return self.zapi_client.features.FLEXGROUP_CLONE_FILE
def _cancel_file_copy(self, job_uuid, volume, dest_pool,
dest_backend_name=None):
"""Cancel an on-going file copy operation."""
try:
# NOTE(sfernand): Another approach would be first checking if
# the copy operation isn't in `destroying` or `destroyed` states
# before issuing cancel.
self.zapi_client.destroy_file_copy(job_uuid)
except na_utils.NetAppDriverException:
dest_client = dot_utils.get_client_for_backend(dest_backend_name)
file_path = '%s/%s' % (dest_pool, volume.name)
try:
dest_client.delete_file(file_path)
except Exception:
LOG.warn('Error cleaning up file %s in destination volume. '
'Verify if destination volume still exists in pool '
'%s and delete it manually to avoid unused '
'resources.', file_path, dest_pool)
def _copy_file(self, volume, src_ontap_volume, src_vserver,
dest_ontap_volume, dest_vserver, dest_file_name=None,
dest_backend_name=None, cancel_on_error=False):
"""Copies file from an ONTAP volume to another."""
job_uuid = self.zapi_client.start_file_copy(
volume.name, dest_ontap_volume, src_ontap_volume=src_ontap_volume,
dest_file_name=dest_file_name)
LOG.debug('Start copying file %(vol)s from '
'%(src_vserver)s:%(src_ontap_vol)s to '
'%(dest_vserver)s:%(dest_ontap_vol)s. Job UUID is %(job)s.',
{'vol': volume.name, 'src_vserver': src_vserver,
'src_ontap_vol': src_ontap_volume,
'dest_vserver': dest_vserver,
'dest_ontap_vol': dest_ontap_volume,
'job': job_uuid})
def _wait_file_copy_complete():
copy_status = self.zapi_client.get_file_copy_status(job_uuid)
LOG.debug('Waiting for file copy job %s to complete. Current '
'status is: %s.', job_uuid, copy_status['job-status'])
if not copy_status:
status_error_msg = (_("Error copying file %s. The "
"corresponding Job UUID % doesn't "
"exist."))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id, job_uuid))
elif copy_status['job-status'] == 'destroyed':
status_error_msg = (_('Error copying file %s. %s.'))
raise na_utils.NetAppDriverException(
status_error_msg % (volume.id,
copy_status['last-failure-reason']))
elif copy_status['job-status'] == 'complete':
raise loopingcall.LoopingCallDone()
try:
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
_wait_file_copy_complete)
timer.start(
interval=10,
timeout=self.configuration.netapp_migrate_volume_timeout
).wait()
except Exception as e:
with excutils.save_and_reraise_exception() as ctxt:
if cancel_on_error:
try:
self._cancel_file_copy(
job_uuid, volume, dest_ontap_volume,
dest_backend_name=dest_backend_name)
except na_utils.NetAppDriverException as ex:
LOG.error("Failed to cancel file copy operation. %s",
ex)
if isinstance(e, loopingcall.LoopingCallTimeOut):
ctxt.reraise = False
msg = (_('Timeout waiting volume %s to complete '
'migration.'))
raise na_utils.NetAppDriverTimeout(msg % volume.id)
def _finish_volume_migration(self, src_volume, dest_pool):
"""Finish volume migration to another ONTAP volume."""
# The source volume can be safely deleted after a successful migration.
self.delete_volume(src_volume)
# NFS driver requires the provider_location to be updated with the new
# destination.
updates = {'provider_location': dest_pool}
return updates
def _migrate_volume_to_vserver(self, volume, src_pool, src_vserver,
dest_pool, dest_vserver, dest_backend_name):
"""Migrate volume to another vserver within the same cluster."""
LOG.info('Migrating volume %(vol)s from '
'%(src_vserver)s:%(src_ontap_vol)s to '
'%(dest_vserver)s:%(dest_ontap_vol)s.',
{'vol': volume.id, 'src_vserver': src_vserver,
'src_ontap_vol': src_pool, 'dest_vserver': dest_vserver,
'dest_ontap_vol': dest_pool})
vserver_peer_application = 'file_copy'
self.create_vserver_peer(src_vserver, self.backend_name, dest_vserver,
[vserver_peer_application])
src_ontap_volume_name = src_pool.split(':/')[1]
dest_ontap_volume_name = dest_pool.split(':/')[1]
self._copy_file(volume, src_ontap_volume_name, src_vserver,
dest_ontap_volume_name, dest_vserver,
dest_backend_name=dest_backend_name,
cancel_on_error=True)
updates = self._finish_volume_migration(volume, dest_pool)
LOG.info('Successfully migrated volume %(vol)s from '
'%(src_vserver)s:%(src_ontap_vol)s '
'to %(dest_vserver)s:%(dest_ontap_vol)s.',
{'vol': volume.id, 'src_vserver': src_vserver,
'src_ontap_vol': src_pool, 'dest_vserver': dest_vserver,
'dest_ontap_vol': dest_pool})
return updates
def _migrate_volume_to_pool(self, volume, src_pool, dest_pool, vserver,
dest_backend_name):
"""Migrate volume to another Cinder Pool within the same vserver."""
LOG.info('Migrating volume %(vol)s from pool %(src)s to '
'%(dest)s within vserver %(vserver)s.',
{'vol': volume.id, 'src': src_pool, 'dest': dest_pool,
'vserver': vserver})
src_ontap_volume_name = src_pool.split(':/')[1]
dest_ontap_volume_name = dest_pool.split(':/')[1]
self._copy_file(volume, src_ontap_volume_name, vserver,
dest_ontap_volume_name, vserver,
dest_backend_name=dest_backend_name,
cancel_on_error=True)
updates = self._finish_volume_migration(volume, dest_pool)
LOG.info('Successfully migrated volume %(vol)s from pool %(src)s '
'to %(dest)s within vserver %(vserver)s.',
{'vol': volume.id, 'src': src_pool, 'dest': dest_pool,
'vserver': vserver})
return updates
def migrate_volume(self, context, volume, host):
"""Migrate Cinder volume to the specified pool or vserver."""
# NOTE(sfernand): the NetApp NFS driver relies only on coping
# operations for storage assisted migration which are always
# disruptive, as requires the destination volume to be added as a new
# block device to be the Nova instance.
if volume.status != fields.VolumeStatus.AVAILABLE:
LOG.info("Storage assisted migration requires volume to be in "
"available status. Falling back to host assisted "
"migration.")
return False, {}
return self.migrate_volume_ontap_assisted(
volume, host, self.backend_name, self.configuration.netapp_vserver)

View File

@ -607,6 +607,36 @@ class DataMotionMixin(object):
# unreachable
pass
def create_vserver_peer(self, src_vserver, src_backend_name, dest_vserver,
peer_applications):
"""Create a vserver peer relationship"""
src_client = config_utils.get_client_for_backend(
src_backend_name, vserver_name=src_vserver)
vserver_peers = src_client.get_vserver_peers(src_vserver, dest_vserver)
if not vserver_peers:
src_client.create_vserver_peer(
src_vserver, dest_vserver,
vserver_peer_application=peer_applications)
LOG.debug("Vserver peer relationship created between %(src)s "
"and %(dest)s. Peering application set to %(app)s.",
{'src': src_vserver, 'dest': dest_vserver,
'app': peer_applications})
return None
for vserver_peer in vserver_peers:
if all(app in vserver_peer['applications'] for app in
peer_applications):
LOG.debug("Found vserver peer relationship between %s and %s.",
src_vserver, dest_vserver)
return None
msg = _("Vserver peer relationship found between %(src)s and %(dest)s "
"but peering application %(app)s isn't defined.")
raise na_utils.NetAppDriverException(msg % {'src': src_vserver,
'dest': dest_vserver,
'app': peer_applications})
def _choose_failover_target(self, backend_name, flexvols,
replication_targets):
target_lag_times = []
@ -740,3 +770,75 @@ class DataMotionMixin(object):
def _get_replication_volume_online_timeout(self):
return self.configuration.netapp_replication_volume_online_timeout
def migrate_volume_ontap_assisted(self, volume, host, src_backend_name,
src_vserver):
"""Migrate Cinder volume using ONTAP capabilities"""
_, src_pool = volume.host.split('#')
dest_backend, dest_pool = host["host"].split('#')
_, dest_backend_name = dest_backend.split('@')
# Check if migration occurs in the same backend. If so, a migration
# between Cinder pools in the same vserver will be performed.
if src_backend_name == dest_backend_name:
# We should skip the operation in case source and destination pools
# are the same.
if src_pool == dest_pool:
LOG.info('Skipping volume migration as source and destination '
'are the same.')
return True, {}
updates = self._migrate_volume_to_pool(
volume, src_pool, dest_pool, src_vserver, dest_backend_name)
else:
if not self.using_cluster_credentials:
LOG.info('Storage assisted volume migration across backends '
'requires ONTAP cluster-wide credentials. Falling '
'back to host assisted migration.')
return False, {}
dest_backend_config = config_utils.get_backend_configuration(
dest_backend_name)
dest_vserver = dest_backend_config.netapp_vserver
dest_client = config_utils.get_client_for_backend(
dest_backend_name)
src_client = config_utils.get_client_for_backend(
src_backend_name)
# In case origin and destination backends are not pointing to the
# same cluster, a host copy strategy using is required. Otherwise,
# an intra-cluster operation can be done to complete the migration.
src_cluster_name = src_client.get_cluster_name()
dest_cluster_name = dest_client.get_cluster_name()
if src_cluster_name != dest_cluster_name:
LOG.info('Driver only supports storage assisted migration '
'between pools in a same cluster. Falling back to '
'host assisted migration.')
return False, {}
# if origin and destination vservers are the same, simply move
# the cinder volume from one pool to the other.
# Otherwise, an intra-cluster Vserver peer relationship
# followed by a volume copy operation are required.
# Both operations will copy data between ONTAP volumes
# and won't finish in constant time as volume clones.
if src_vserver == dest_vserver:
# We should skip the operation in case source and
# destination pools are the same
if src_pool == dest_pool:
LOG.info('Skipping volume migration as source and '
'destination are the same.')
return True, {}
updates = self._migrate_volume_to_pool(
volume, src_pool, dest_pool, src_vserver,
dest_backend_name)
else:
updates = self._migrate_volume_to_vserver(
volume, src_pool, src_vserver, dest_pool,
dest_backend_config.netapp_vserver,
dest_backend_name)
LOG.info('Successfully migrated volume %s to host %s.',
volume.id, host['host'])
return True, updates

View File

@ -56,6 +56,7 @@ def get_backend_configuration(backend_name):
config.append_config_values(na_opts.netapp_san_opts)
config.append_config_values(na_opts.netapp_replication_opts)
config.append_config_values(na_opts.netapp_support_opts)
config.append_config_values(na_opts.netapp_migration_opts)
return config

View File

@ -182,6 +182,14 @@ netapp_support_opts = [
'all APIs will be traced.')),
]
netapp_migration_opts = [
cfg.IntOpt('netapp_migrate_volume_timeout',
default=3600,
min=30,
help='Sets time in seconds to wait for storage assisted volume '
'migration to complete.'),
]
CONF = cfg.CONF
CONF.register_opts(netapp_proxy_opts, group=conf.SHARED_CONF_GROUP)
CONF.register_opts(netapp_connection_opts, group=conf.SHARED_CONF_GROUP)
@ -194,3 +202,4 @@ CONF.register_opts(netapp_nfs_extra_opts, group=conf.SHARED_CONF_GROUP)
CONF.register_opts(netapp_san_opts, group=conf.SHARED_CONF_GROUP)
CONF.register_opts(netapp_replication_opts, group=conf.SHARED_CONF_GROUP)
CONF.register_opts(netapp_support_opts, group=conf.SHARED_CONF_GROUP)
CONF.register_opts(netapp_migration_opts, group=conf.SHARED_CONF_GROUP)

View File

@ -93,6 +93,10 @@ class GeometryHasChangedOnDestination(NetAppDriverException):
message = _("Geometry has changed on destination volume.")
class NetAppDriverTimeout(NetAppDriverException):
message = _("Timeout in NetApp Cinder Driver.")
def validate_instantiation(**kwargs):
"""Checks if a driver is instantiated other than by the unified driver.

View File

@ -760,7 +760,7 @@ driver.linbit_linstor=missing
driver.lvm=missing
driver.macrosan=complete
driver.nec=complete
driver.netapp_ontap=missing
driver.netapp_ontap=complete
driver.netapp_solidfire=complete
driver.nexenta=missing
driver.nfs=missing

View File

@ -0,0 +1,5 @@
---
features:
- |
NetApp ONTAP: Added support for storage assisted migration within a
same ONTAP cluster (iSCSI/FC/NFS).