Browse Source

Sync in charms.ceph

This brings in the new broker change to restrict
key access by groups

Change-Id: I19ad0142b4227ba555a0794e8b938372d9fdb84c
Partial-Bug: 1424771
changes/89/432289/1
Chris MacNaughton 2 years ago
parent
commit
3dfeff7a19
2 changed files with 244 additions and 21 deletions
  1. 66
    20
      lib/ceph/__init__.py
  2. 178
    1
      lib/ceph/ceph_broker.py

+ 66
- 20
lib/ceph/__init__.py View File

@@ -11,6 +11,7 @@
11 11
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 12
 # See the License for the specific language governing permissions and
13 13
 # limitations under the License.
14
+from _ctypes import POINTER, byref
14 15
 import ctypes
15 16
 import collections
16 17
 import json
@@ -309,22 +310,52 @@ def set_hdd_read_ahead(dev_name, read_ahead_sectors=256):
309 310
 
310 311
 def get_block_uuid(block_dev):
311 312
     """
312
-    This queries blkid to get the uuid for a block device.
313
+    This queries blkid to get the uuid for a block device. Note: This function
314
+    needs to be called with root priv.  It will raise an error otherwise.
313 315
     :param block_dev: Name of the block device to query.
314
-    :return: The UUID of the device or None on Error.
316
+    :return: The UUID of the device or None on Error. Raises OSError
315 317
     """
316 318
     try:
317
-        block_info = subprocess.check_output(
318
-            ['blkid', '-o', 'export', block_dev])
319
-        for tag in block_info.split('\n'):
320
-            parts = tag.split('=')
321
-            if parts[0] == 'UUID':
322
-                return parts[1]
323
-        return None
324
-    except subprocess.CalledProcessError as err:
325
-        log('get_block_uuid failed with error: {}'.format(err.output),
319
+        blkid = ctypes.cdll.LoadLibrary("libblkid.so")
320
+        # Header signature
321
+        # extern int blkid_probe_lookup_value(blkid_probe pr, const char *name,
322
+        #                                     const char **data, size_t *len);
323
+        blkid.blkid_new_probe_from_filename.argtypes = [ctypes.c_char_p]
324
+        blkid.blkid_probe_lookup_value.argtypes = [ctypes.c_void_p,
325
+                                                   ctypes.c_char_p,
326
+                                                   POINTER(ctypes.c_char_p),
327
+                                                   POINTER(ctypes.c_ulong)]
328
+    except OSError as err:
329
+        log('get_block_uuid loading libblkid.so failed with error: {}'.format(
330
+            os.strerror(err.errno)),
326 331
             level=ERROR)
332
+        raise err
333
+    if not os.path.exists(block_dev):
327 334
         return None
335
+    probe = blkid.blkid_new_probe_from_filename(ctypes.c_char_p(block_dev))
336
+    if probe < 0:
337
+        log('get_block_uuid new_probe_from_filename failed: {}'.format(
338
+            os.strerror(probe)),
339
+            level=ERROR)
340
+        raise OSError(probe, os.strerror(probe))
341
+    result = blkid.blkid_do_probe(probe)
342
+    if result != 0:
343
+        log('get_block_uuid do_probe failed with error: {}'.format(
344
+            os.strerror(result)),
345
+            level=ERROR)
346
+        raise OSError(result, os.strerror(result))
347
+    uuid = ctypes.c_char_p()
348
+    result = blkid.blkid_probe_lookup_value(probe,
349
+                                            ctypes.c_char_p(
350
+                                                'UUID'.encode('ascii')),
351
+                                            byref(uuid), None)
352
+    if result < 0:
353
+        log('get_block_uuid lookup_value failed with error: {}'.format(
354
+            os.strerror(result)),
355
+            level=ERROR)
356
+        raise OSError(result, os.strerror(result))
357
+    blkid.blkid_free_probe(probe)
358
+    return ctypes.string_at(uuid).decode('ascii')
328 359
 
329 360
 
330 361
 def check_max_sectors(save_settings_dict,
@@ -390,6 +421,7 @@ def tune_dev(block_dev):
390 421
     if uuid is None:
391 422
         log('block device {} uuid is None.  Unable to save to '
392 423
             'hdparm.conf'.format(block_dev), level=DEBUG)
424
+        return
393 425
     save_settings_dict = {}
394 426
     log('Tuning device {}'.format(block_dev))
395 427
     status_set('maintenance', 'Tuning device {}'.format(block_dev))
@@ -1430,10 +1462,17 @@ def upgrade_monitor(new_version):
1430 1462
             service_stop('ceph-mon-all')
1431 1463
         apt_install(packages=PACKAGES, fatal=True)
1432 1464
 
1433
-        # Ensure the ownership of Ceph's directories is correct
1434
-        chownr(path=os.path.join(os.sep, "var", "lib", "ceph"),
1435
-               owner=ceph_user(),
1436
-               group=ceph_user())
1465
+        # Ensure the files and directories under /var/lib/ceph is chowned
1466
+        # properly as part of the move to the Jewel release, which moved the
1467
+        # ceph daemons to running as ceph:ceph instead of root:root.
1468
+        if new_version == 'jewel':
1469
+            # Ensure the ownership of Ceph's directories is correct
1470
+            owner = ceph_user()
1471
+            chownr(path=os.path.join(os.sep, "var", "lib", "ceph"),
1472
+                   owner=owner,
1473
+                   group=owner,
1474
+                   follow_links=True)
1475
+
1437 1476
         if systemd():
1438 1477
             for mon_id in get_local_mon_ids():
1439 1478
                 service_start('ceph-mon@{}'.format(mon_id))
@@ -1608,10 +1647,18 @@ def upgrade_osd(new_version):
1608 1647
             service_stop('ceph-osd-all')
1609 1648
         apt_install(packages=PACKAGES, fatal=True)
1610 1649
 
1611
-        # Ensure the ownership of Ceph's directories is correct
1612
-        chownr(path=os.path.join(os.sep, "var", "lib", "ceph"),
1613
-               owner=ceph_user(),
1614
-               group=ceph_user())
1650
+        # Ensure the files and directories under /var/lib/ceph is chowned
1651
+        # properly as part of the move to the Jewel release, which moved the
1652
+        # ceph daemons to running as ceph:ceph instead of root:root. Only do
1653
+        # it when necessary as this is an expensive operation to run.
1654
+        if new_version == 'jewel':
1655
+            owner = ceph_user()
1656
+            status_set('maintenance', 'Updating file ownership for OSDs')
1657
+            chownr(path=os.path.join(os.sep, "var", "lib", "ceph"),
1658
+                   owner=owner,
1659
+                   group=owner,
1660
+                   follow_links=True)
1661
+
1615 1662
         if systemd():
1616 1663
             for osd_id in get_local_osd_ids():
1617 1664
                 service_start('ceph-osd@{}'.format(osd_id))
@@ -1642,7 +1689,6 @@ def list_pools(service):
1642 1689
         log("rados lspools failed with error: {}".format(err.output))
1643 1690
         raise
1644 1691
 
1645
-
1646 1692
 # A dict of valid ceph upgrade paths.  Mapping is old -> new
1647 1693
 UPGRADE_PATHS = {
1648 1694
     'firefly': 'hammer',

+ 178
- 1
lib/ceph/ceph_broker.py View File

@@ -34,6 +34,8 @@ from charmhelpers.contrib.storage.linux.ceph import (
34 34
     delete_pool,
35 35
     erasure_profile_exists,
36 36
     get_osds,
37
+    monitor_key_get,
38
+    monitor_key_set,
37 39
     pool_exists,
38 40
     pool_set,
39 41
     remove_pool_snapshot,
@@ -49,7 +51,7 @@ from charmhelpers.contrib.storage.linux.ceph import (
49 51
 # This comes from http://docs.ceph.com/docs/master/rados/operations/pools/
50 52
 # This should do a decent job of preventing people from passing in bad values.
51 53
 # It will give a useful error message
52
-from subprocess import check_output, CalledProcessError
54
+from subprocess import check_call, check_output, CalledProcessError
53 55
 
54 56
 POOL_KEYS = {
55 57
     # "Ceph Key Name": [Python type, [Valid Range]]
@@ -157,11 +159,169 @@ def handle_create_erasure_profile(request, service):
157 159
                            data_chunks=k, coding_chunks=m, locality=l)
158 160
 
159 161
 
162
+def handle_add_permissions_to_key(request, service):
163
+    """
164
+    Groups are defined by the key cephx.groups.(namespace-)?-(name). This key
165
+    will contain a dict serialized to JSON with data about the group, including
166
+    pools and members.
167
+
168
+    A group can optionally have a namespace defined that will be used to
169
+    further restrict pool access.
170
+    """
171
+    service_name = request.get('name')
172
+    group_name = request.get('group')
173
+    group_namespace = request.get('group-namespace')
174
+    if group_namespace:
175
+        group_name = "{}-{}".format(group_namespace, group_name)
176
+    group = get_group(group_name=group_name)
177
+    service_obj = get_service_groups(service=service_name,
178
+                                     namespace=group_namespace)
179
+    format("Service object: {}".format(service_obj))
180
+    permission = request.get('group-permission') or "rwx"
181
+    if service_name not in group['services']:
182
+        group['services'].append(service_name)
183
+    save_group(group=group, group_name=group_name)
184
+    if permission not in service_obj['group_names']:
185
+        service_obj['group_names'][permission] = []
186
+    if group_name not in service_obj['group_names'][permission]:
187
+        service_obj['group_names'][permission].append(group_name)
188
+    save_service(service=service_obj, service_name=service_name)
189
+    service_obj['groups'][group_name] = group
190
+    update_service_permissions(service_name, service_obj, group_namespace)
191
+
192
+
193
+def update_service_permissions(service, service_obj=None, namespace=None):
194
+    """Update the key permissions for the named client in Ceph"""
195
+    if not service_obj:
196
+        service_obj = get_service_groups(service=service, namespace=namespace)
197
+    permissions = pool_permission_list_for_service(service_obj)
198
+    call = ['ceph', 'auth', 'caps', 'client.{}'.format(service)] + permissions
199
+    try:
200
+        check_call(call)
201
+    except CalledProcessError as e:
202
+        log("Error updating key capabilities: {}".format(e))
203
+
204
+
205
+def add_pool_to_group(pool, group, namespace=None):
206
+    """Add a named pool to a named group"""
207
+    group_name = group
208
+    if namespace:
209
+        group_name = "{}-{}".format(namespace, group_name)
210
+    group = get_group(group_name=group_name)
211
+    group["pools"].append(pool)
212
+    save_group(group, group_name=group_name)
213
+    for service in group['services']:
214
+        update_service_permissions(service, namespace=namespace)
215
+
216
+
217
+def pool_permission_list_for_service(service):
218
+    """Build the permission string for Ceph for a given service"""
219
+    permissions = ""
220
+    permission_types = {}
221
+    for permission, group in service["group_names"].items():
222
+        if permission not in permission_types:
223
+            permission_types[permission] = []
224
+        for item in group:
225
+            permission_types[permission].append(item)
226
+    for permission, groups in permission_types.items():
227
+        permission = " allow {}".format(permission)
228
+        for group in groups:
229
+            for pool in service['groups'][group]['pools']:
230
+                permission = "{} pool={}".format(permission, pool)
231
+        permissions += permission
232
+    return ["mon", "allow r", "osd", permissions.strip()]
233
+
234
+
235
+def get_service_groups(service, namespace=None):
236
+    """
237
+    Services are objects stored with some metadata, they look like (for a
238
+    service named "nova"):
239
+    {
240
+        group_names: {'rwx': ['images']},
241
+        groups: {}
242
+    }
243
+    After populating the group, it looks like:
244
+    {
245
+        group_names: {'rwx': ['images']},
246
+        groups: {
247
+    1        'images': {
248
+                pools: ['glance'],
249
+                services: ['nova']
250
+            }
251
+        }
252
+    }
253
+    """
254
+    service_json = monitor_key_get(service='admin',
255
+                                   key="cephx.services.{}".format(service))
256
+    try:
257
+        service = json.loads(service_json)
258
+    except TypeError:
259
+        service = None
260
+    except ValueError:
261
+        service = None
262
+    if service:
263
+        for permission, groups in service['group_names'].items():
264
+            for group in groups:
265
+                name = group
266
+                if namespace:
267
+                    name = "{}-{}".format(namespace, name)
268
+                service['groups'][group] = get_group(group_name=name)
269
+    else:
270
+        service = {'group_names': {}, 'groups': {}}
271
+    return service
272
+
273
+
274
+def get_group(group_name):
275
+    """
276
+    A group is a structure to hold data about a named group, structured as:
277
+    {
278
+        pools: ['glance'],
279
+        services: ['nova']
280
+    }
281
+    """
282
+    group_key = get_group_key(group_name=group_name)
283
+    group_json = monitor_key_get(service='admin', key=group_key)
284
+    try:
285
+        group = json.loads(group_json)
286
+    except TypeError:
287
+        group = None
288
+    except ValueError:
289
+        group = None
290
+    if not group:
291
+        group = {
292
+            'pools': [],
293
+            'services': []
294
+        }
295
+    return group
296
+
297
+
298
+def save_service(service_name, service):
299
+    """Persist a service in the monitor cluster"""
300
+    service['groups'] = {}
301
+    return monitor_key_set(service='admin',
302
+                           key="cephx.services.{}".format(service_name),
303
+                           value=json.dumps(service))
304
+
305
+
306
+def save_group(group, group_name):
307
+    """Persist a group in the monitor cluster"""
308
+    group_key = get_group_key(group_name=group_name)
309
+    return monitor_key_set(service='admin',
310
+                           key=group_key,
311
+                           value=json.dumps(group))
312
+
313
+
314
+def get_group_key(group_name):
315
+    """Build group key"""
316
+    return 'cephx.groups.{}'.format(group_name)
317
+
318
+
160 319
 def handle_erasure_pool(request, service):
161 320
     pool_name = request.get('name')
162 321
     erasure_profile = request.get('erasure-profile')
163 322
     quota = request.get('max-bytes')
164 323
     weight = request.get('weight')
324
+    group_name = request.get('group')
165 325
 
166 326
     if erasure_profile is None:
167 327
         erasure_profile = "default-canonical"
@@ -172,6 +332,13 @@ def handle_erasure_pool(request, service):
172 332
         log(msg, level=ERROR)
173 333
         return {'exit-code': 1, 'stderr': msg}
174 334
 
335
+    if group_name:
336
+        group_namespace = request.get('group-namespace')
337
+        # Add the pool to the group named "group_name"
338
+        add_pool_to_group(pool=pool_name,
339
+                          group=group_name,
340
+                          namespace=group_namespace)
341
+
175 342
     # TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds
176 343
     if not erasure_profile_exists(service=service, name=erasure_profile):
177 344
         # TODO: Fail and tell them to create the profile or default
@@ -200,6 +367,7 @@ def handle_replicated_pool(request, service):
200 367
     replicas = request.get('replicas')
201 368
     quota = request.get('max-bytes')
202 369
     weight = request.get('weight')
370
+    group_name = request.get('group')
203 371
 
204 372
     # Optional params
205 373
     pg_num = request.get('pg_num')
@@ -215,6 +383,13 @@ def handle_replicated_pool(request, service):
215 383
         log(msg, level=ERROR)
216 384
         return {'exit-code': 1, 'stderr': msg}
217 385
 
386
+    if group_name:
387
+        group_namespace = request.get('group-namespace')
388
+        # Add the pool to the group named "group_name"
389
+        add_pool_to_group(pool=pool_name,
390
+                          group=group_name,
391
+                          namespace=group_namespace)
392
+
218 393
     kwargs = {}
219 394
     if pg_num:
220 395
         kwargs['pg_num'] = pg_num
@@ -570,6 +745,8 @@ def process_requests_v1(reqs):
570 745
             ret = handle_rgw_create_user(request=req, service=svc)
571 746
         elif op == "move-osd-to-bucket":
572 747
             ret = handle_put_osd_in_bucket(request=req, service=svc)
748
+        elif op == "add-permissions-to-key":
749
+            ret = handle_add_permissions_to_key(request=req, service=svc)
573 750
         else:
574 751
             msg = "Unknown operation '%s'" % op
575 752
             log(msg, level=ERROR)

Loading…
Cancel
Save