Fix volume migration fails in the same ceph RBD pool
For the same ceph RBD pool, volume migration only needs to update volume’s host value to destination host. In this case, we can migrate volume in available or in-use status. Change-Id: I01039b7651a354761d034a9118b55b96cf32445e Closes-Bug: #1871524
This commit is contained in:
parent
edd5bcace2
commit
ea5a9c35ea
@ -2311,7 +2311,7 @@ class RBDTestCase(test.TestCase):
|
||||
self.assertEqual(3.00, total_provision)
|
||||
|
||||
def test_migrate_volume_bad_volume_status(self):
|
||||
self.volume_a.status = 'in-use'
|
||||
self.volume_a.status = 'backuping'
|
||||
ret = self.driver.migrate_volume(context, self.volume_a, None)
|
||||
self.assertEqual((False, None), ret)
|
||||
|
||||
@ -2373,8 +2373,7 @@ class RBDTestCase(test.TestCase):
|
||||
|
||||
@mock.patch('os_brick.initiator.linuxrbd.rbd')
|
||||
@mock.patch('os_brick.initiator.linuxrbd.RBDClient')
|
||||
@mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy')
|
||||
def test_migrate_volume(self, mock_proxy, mock_client, mock_rbd):
|
||||
def test_migrate_volume_same_pool(self, mock_client, mock_rbd):
|
||||
host = {
|
||||
'capabilities': {
|
||||
'storage_protocol': 'ceph',
|
||||
@ -2382,6 +2381,39 @@ class RBDTestCase(test.TestCase):
|
||||
|
||||
mock_client().__enter__().client.get_fsid.return_value = 'abc'
|
||||
|
||||
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
|
||||
mock_get_fsid.return_value = 'abc'
|
||||
ret = self.driver.migrate_volume(context, self.volume_a, host)
|
||||
self.assertEqual((True, None), ret)
|
||||
|
||||
@mock.patch('os_brick.initiator.linuxrbd.rbd')
|
||||
@mock.patch('os_brick.initiator.linuxrbd.RBDClient')
|
||||
def test_migrate_volume_insue_different_pool(self, mock_client, mock_rbd):
|
||||
self.volume_a.status = 'in-use'
|
||||
host = {
|
||||
'capabilities': {
|
||||
'storage_protocol': 'ceph',
|
||||
'location_info': 'nondefault:None:abc:None:rbd2'}}
|
||||
|
||||
mock_client().__enter__().client.get_fsid.return_value = 'abc'
|
||||
|
||||
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid:
|
||||
mock_get_fsid.return_value = 'abc'
|
||||
ret = self.driver.migrate_volume(context, self.volume_a, host)
|
||||
self.assertEqual((False, None), ret)
|
||||
|
||||
@mock.patch('os_brick.initiator.linuxrbd.rbd')
|
||||
@mock.patch('os_brick.initiator.linuxrbd.RBDClient')
|
||||
@mock.patch('cinder.volume.drivers.rbd.RBDVolumeProxy')
|
||||
def test_migrate_volume_different_pool(self, mock_proxy, mock_client,
|
||||
mock_rbcd):
|
||||
host = {
|
||||
'capabilities': {
|
||||
'storage_protocol': 'ceph',
|
||||
'location_info': 'nondefault:None:abc:None:rbd2'}}
|
||||
|
||||
mock_client().__enter__().client.get_fsid.return_value = 'abc'
|
||||
|
||||
with mock.patch.object(self.driver, '_get_fsid') as mock_get_fsid, \
|
||||
mock.patch.object(self.driver, 'delete_volume') as mock_delete:
|
||||
mock_get_fsid.return_value = 'abc'
|
||||
|
@ -1823,9 +1823,11 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
|
||||
|
||||
refuse_to_migrate = (False, None)
|
||||
|
||||
if volume.status not in ('available', 'retyping', 'maintenance'):
|
||||
LOG.debug('Only available volumes can be migrated using backend '
|
||||
'assisted migration. Falling back to generic migration.')
|
||||
if volume.status not in ('available', 'retyping', 'maintenance',
|
||||
'in-use'):
|
||||
LOG.debug('Only available or in-use volumes can be migrated using'
|
||||
'backend assisted migration. Falling back to generic'
|
||||
'migration.')
|
||||
return refuse_to_migrate
|
||||
|
||||
if (host['capabilities']['storage_protocol'] != 'ceph'):
|
||||
@ -1864,6 +1866,16 @@ class RBDDriver(driver.CloneableImageVD, driver.MigrateVD,
|
||||
'Falling back to generic migration.')
|
||||
return refuse_to_migrate
|
||||
|
||||
if rbd_pool == self.configuration.rbd_pool:
|
||||
LOG.debug('Migration in the same pool, just need to update'
|
||||
'volume’s host value to destination host.')
|
||||
return (True, None)
|
||||
|
||||
if volume.status == 'in-use':
|
||||
LOG.debug('Migration in-use volume between different pools.'
|
||||
'Falling back to generic migration.')
|
||||
return refuse_to_migrate
|
||||
|
||||
with RBDVolumeProxy(self, volume.name, read_only=True) as source:
|
||||
try:
|
||||
source.copy(target.ioctx, volume.name)
|
||||
|
5
releasenotes/notes/bug-1871524-5f6df9a61bf6b775.yaml
Normal file
5
releasenotes/notes/bug-1871524-5f6df9a61bf6b775.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
Fix volume migration fails in the same ceph RBD pool. `Bug 1871524
|
||||
<https://bugs.launchpad.net/cinder/+bug/1871524>`__.
|
Loading…
Reference in New Issue
Block a user