cephfs_native: enhance update_access()

During recovery/maintenance mode, the driver applied all the
rules it was expected to sync. It couldn't e.g., remove the
already existing but unwanted access rules from the backend.
This was because CephFSVolumeClient, the interface to Ceph
backend, did not have the ability to fetch the existing
access rules from the backend.

Since the following commits in Ceph,
master branch, https://github.com/ceph/ceph/commits/1c1d65a45f4574
Jewel branch,  https://github.com/ceph/ceph/commits/e1eb8afea9f202
the CephFSVolumeClient is versioned and can list the existing
access rules. Use this to more faithfully implement update_access()
and ensure backwards compatibility with earlier versions of
CephFSVolumeClient.

Also, when authorizing access for a ceph auth ID using
CephFSVolumeClient's authorize(), pass the project ID of the
share as an additional argument. This is required (since
the above mentioned commits) to retrieve the access key of
the ceph auth ID.

The driver's update_access() now returns access_keys as a
dictionary with access rule ID and access key as key, value
pairs. This would be useful once Manila can store the
access_keys in its DB and expose them to the user.

Partially Implements: bp cephfs-native-driver-enhancements
Change-Id: I49782d62c0a8382d985b9b08612cf5bc394837ae
This commit is contained in:
Ramana Raja 2016-07-25 21:26:02 +05:30
parent 0f596c55df
commit 08ffd6bdb5
3 changed files with 90 additions and 30 deletions

View File

@ -231,7 +231,8 @@ class CephFSNativeDriver(driver.ShareDriver,):
else:
readonly = access['access_level'] == constants.ACCESS_LEVEL_RO
auth_result = self.volume_client.authorize(
self._share_path(share), ceph_auth_id, readonly=readonly)
self._share_path(share), ceph_auth_id, readonly=readonly,
tenant_id=share['project_id'])
return auth_result['auth_key']
@ -250,25 +251,45 @@ class CephFSNativeDriver(driver.ShareDriver,):
def update_access(self, context, share, access_rules, add_rules,
delete_rules, share_server=None):
# The interface to Ceph just provides add/remove methods, since it
# was created at start of mitaka cycle when there was no requirement
# to be able to list access rules or set them en masse. Therefore
# we implement update_access as best we can. In future ceph's
# interface should be extended to enable a full implementation
# of update_access.
access_keys = {}
if not (add_rules or delete_rules): # recovery/maintenance mode
add_rules = access_rules
existing_auths = None
# The unversioned volume client cannot fetch from the Ceph backend,
# the list of auth IDs that have share access.
if getattr(self.volume_client, 'version', None):
existing_auths = self.volume_client.get_authorized_ids(
self._share_path(share))
if existing_auths:
existing_auth_ids = set(
[auth[0] for auth in existing_auths])
want_auth_ids = set(
[rule['access_to'] for rule in add_rules])
delete_auth_ids = existing_auth_ids.difference(
want_auth_ids)
for delete_auth_id in delete_auth_ids:
delete_rules.append(
{
'access_to': delete_auth_id,
'access_type': CEPHX_ACCESS_TYPE,
})
# During recovery mode, re-authorize share access for auth IDs that
# were already granted access by the backend. Do this to fetch their
# access keys and ensure that after recovery, manila and the Ceph
# backend are in sync.
for rule in add_rules:
self._allow_access(context, share, rule)
access_key = self._allow_access(context, share, rule)
access_keys.update({rule['id']: access_key})
for rule in delete_rules:
self._deny_access(context, share, rule)
# This is where we would list all permitted clients and remove
# those that are not in `access_rules` if the ceph interface
# enabled it.
if not (add_rules or delete_rules):
for rule in access_rules:
self._allow_access(context, share, rule)
return access_keys
def delete_share(self, context, share, share_server=None):
extra_specs = share_types.get_extra_specs_from_share(share)

View File

@ -58,12 +58,14 @@ class MockVolumeClientModule(object):
"delete_volume", "purge_volume",
"deauthorize", "evict", "set_max_bytes",
"destroy_snapshot_group", "create_snapshot_group",
"disconnect"
"get_authorized_ids"
])
self.create_volume = mock.Mock(return_value={
"mount_path": "/foo/bar"
})
self.get_mon_addrs = mock.Mock(return_value=["1.2.3.4", "5.6.7.8"])
self.get_authorized_ids = mock.Mock(
return_value=[('eve', 'rw')])
self.authorize = mock.Mock(return_value={"auth_key": "abc123"})
self.get_used_bytes = mock.Mock(return_value=self.mock_used_bytes)
self.rados = mock.Mock()
@ -176,6 +178,7 @@ class CephFSNativeDriverTestCase(test.TestCase):
self._context, self._share, rule)
self.assertEqual("abc123", auth_key)
if not volume_client_version:
self._driver._volume_client.authorize.assert_called_once_with(
self._driver._share_path(self._share),
@ -184,7 +187,8 @@ class CephFSNativeDriverTestCase(test.TestCase):
self._driver._volume_client.authorize.assert_called_once_with(
self._driver._share_path(self._share),
"alice",
readonly=False)
readonly=False,
tenant_id=self._share['project_id'])
@ddt.data(None, 1)
def test_allow_access_ro(self, volume_client_version):
@ -207,7 +211,9 @@ class CephFSNativeDriverTestCase(test.TestCase):
self._driver._volume_client.authorize.assert_called_once_with(
self._driver._share_path(self._share),
"alice",
readonly=True)
readonly=True,
tenant_id=self._share['project_id'],
)
def test_allow_access_wrong_type(self):
self.assertRaises(exception.InvalidShareAccess,
@ -243,43 +249,69 @@ class CephFSNativeDriverTestCase(test.TestCase):
def test_update_access_add_rm(self):
alice = {
'id': 'accessid1',
'access_level': 'rw',
'access_type': 'cephx',
'access_to': 'alice'
}
bob = {
'id': 'accessid2',
'access_level': 'rw',
'access_type': 'cephx',
'access_to': 'bob'
}
self._driver.update_access(self._context, self._share,
access_rules=[alice],
add_rules=[alice],
delete_rules=[bob])
access_keys = self._driver.update_access(self._context, self._share,
access_rules=[alice],
add_rules=[alice],
delete_rules=[bob])
self.assertEqual({'accessid1': 'abc123'}, access_keys)
self._driver._volume_client.authorize.assert_called_once_with(
self._driver._share_path(self._share),
"alice",
readonly=False)
readonly=False,
tenant_id=self._share['project_id'])
self._driver._volume_client.deauthorize.assert_called_once_with(
self._driver._share_path(self._share),
"bob")
def test_update_access_all(self):
@ddt.data(None, 1)
def test_update_access_all(self, volume_client_version):
alice = {
'id': 'accessid1',
'access_level': 'rw',
'access_type': 'cephx',
'access_to': 'alice'
}
self._driver.volume_client.version = volume_client_version
self._driver.update_access(self._context, self._share,
access_rules=[alice], add_rules=[],
delete_rules=[])
access_keys = self._driver.update_access(self._context, self._share,
access_rules=[alice],
add_rules=[],
delete_rules=[])
self._driver._volume_client.authorize.assert_called_once_with(
self._driver._share_path(self._share),
"alice",
readonly=False)
self.assertEqual({'accessid1': 'abc123'}, access_keys)
if volume_client_version:
(self._driver._volume_client.get_authorized_ids.
assert_called_once_with(self._driver._share_path(self._share)))
self._driver._volume_client.authorize.assert_called_once_with(
self._driver._share_path(self._share),
"alice",
readonly=False,
tenant_id=self._share['project_id']
)
self._driver._volume_client.deauthorize.assert_called_once_with(
self._driver._share_path(self._share),
"eve",
)
else:
self.assertFalse(
self._driver._volume_client.get_authorized_ids.called)
self._driver._volume_client.authorize.assert_called_once_with(
self._driver._share_path(self._share),
"alice",
)
def test_extend_share(self):
new_size_gb = self._share['size'] * 2

View File

@ -0,0 +1,7 @@
---
features:
- |
Enhanced ``cephfs_native`` driver's update_access() to,
- remove undesired rules existing in the backend during recovery mode.
- return ``access_keys`` of ceph auth IDs that are allowed access.