glusterfs volume layout: take care of deletion of DOA shares
In delete_share, if private_storage entry of share is missing, recognize this as indication of a botched creation and return immediately so that the runtime can go on with evicting the dangling share entry. Change-Id: I76dabe0acc0b67ea2b03e77eb0743772ef25579d Closes-bug: #1554290
This commit is contained in:
parent
cd72947258
commit
ec5d9ca466
|
@ -186,8 +186,10 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
|||
|
||||
def _share_manager(self, share):
|
||||
"""Return GlusterManager object representing share's backend."""
|
||||
return self._glustermanager(self.private_storage.get(
|
||||
share['id'], 'volume'))
|
||||
gluster_address = self.private_storage.get(share['id'], 'volume')
|
||||
if gluster_address is None:
|
||||
return
|
||||
return self._glustermanager(gluster_address)
|
||||
|
||||
def _fetch_gluster_volumes(self, filter_used=True):
|
||||
"""Do a 'gluster volume list | grep <volume pattern>'.
|
||||
|
@ -394,9 +396,9 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
|||
gmgr = self._glustermanager(vol)
|
||||
export = self.driver._setup_via_manager(
|
||||
{'share': share, 'manager': gmgr})
|
||||
self.private_storage.update(share['id'], {'volume': vol})
|
||||
|
||||
gmgr.set_vol_option(USER_MANILA_SHARE, share['id'])
|
||||
self.private_storage.update(share['id'], {'volume': vol})
|
||||
|
||||
# TODO(deepakcs): Enable quota and set it to the share size.
|
||||
|
||||
|
@ -413,6 +415,15 @@ class GlusterfsVolumeMappedLayout(layout.GlusterfsShareLayoutBase):
|
|||
volume back in the available list.
|
||||
"""
|
||||
gmgr = self._share_manager(share)
|
||||
if not gmgr:
|
||||
# Share does not have a record in private storage.
|
||||
# It means create_share{,_from_snapshot} did not
|
||||
# succeed(*). In that case we should not obstruct
|
||||
# share deletion, so we just return doing nothing.
|
||||
#
|
||||
# (*) or we have a database corruption but then
|
||||
# basically does not matter what we do here
|
||||
return
|
||||
clone_of = gmgr.get_vol_option(USER_CLONED_FROM) or ''
|
||||
try:
|
||||
if UUID_RE.search(clone_of):
|
||||
|
|
|
@ -346,6 +346,16 @@ class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
|||
self._layout._glustermanager.assert_called_once_with('host1:/gv1')
|
||||
self.assertEqual(self.gmgr1, ret)
|
||||
|
||||
def test_share_manager_no_privdata(self):
|
||||
self.mock_object(self._layout.private_storage,
|
||||
'get', mock.Mock(return_value=None))
|
||||
|
||||
ret = self._layout._share_manager(self.share1)
|
||||
|
||||
self._layout.private_storage.get.assert_called_once_with(
|
||||
self.share1['id'], 'volume')
|
||||
self.assertEqual(None, ret)
|
||||
|
||||
def test_ensure_share(self):
|
||||
share = self.share1
|
||||
gmgr1 = common.GlusterManager(self.glusterfs_target1, self._execute,
|
||||
|
@ -635,6 +645,14 @@ class GlusterfsVolumeMappedLayoutTestCase(test.TestCase):
|
|||
self._layout._wipe_gluster_vol.assert_called_once_with(gmgr1)
|
||||
self.assertFalse(self._layout._push_gluster_vol.called)
|
||||
|
||||
def test_delete_share_missing_record(self):
|
||||
self.mock_object(self._layout, '_share_manager',
|
||||
mock.Mock(return_value=None))
|
||||
|
||||
self._layout.delete_share(self._context, self.share1)
|
||||
|
||||
self._layout._share_manager.assert_called_once_with(self.share1)
|
||||
|
||||
def test_create_snapshot(self):
|
||||
self._layout.gluster_nosnap_vols_dict = {}
|
||||
self._layout.glusterfs_versions = {self.glusterfs_server1: ('3', '6')}
|
||||
|
|
Loading…
Reference in New Issue