Move a disk with an action
A new action is implemented to allow an administrator to move an OSD into a different Ceph bucket Change-Id: I6f9a2bfa12e97b4437cfac67747b62741de81e53
This commit is contained in:
parent
3a3cbcb37b
commit
5945a350d2
13
actions.yaml
13
actions.yaml
@ -36,4 +36,15 @@ replace-osd:
|
||||
required: [osd-number, replacement-device]
|
||||
additionalProperties: false
|
||||
list-disks:
|
||||
description: List the unmounted disk on the specified unit
|
||||
description: List the unmounted disk on the specified unit
|
||||
add-disk:
|
||||
description: Add disk(s) to Ceph
|
||||
params:
|
||||
osd-devices:
|
||||
type: string
|
||||
description: The devices to format and set up as osd volumes.
|
||||
bucket:
|
||||
type: string
|
||||
description: The name of the bucket in Ceph to add these devices into
|
||||
required:
|
||||
- osd-devices
|
||||
|
1
actions/add-disk
Symbolic link
1
actions/add-disk
Symbolic link
@ -0,0 +1 @@
|
||||
add_disk.py
|
73
actions/add_disk.py
Executable file
73
actions/add_disk.py
Executable file
@ -0,0 +1,73 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import psutil
|
||||
import sys
|
||||
|
||||
sys.path.append('lib')
|
||||
sys.path.append('hooks')
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
action_get,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.storage.linux.ceph import (
|
||||
CephBrokerRq,
|
||||
send_request_if_needed,
|
||||
)
|
||||
|
||||
import ceph
|
||||
|
||||
from ceph_hooks import (
|
||||
get_journal_devices,
|
||||
)
|
||||
|
||||
|
||||
def add_device(request, device_path, bucket=None):
|
||||
ceph.osdize(dev, config('osd-format'),
|
||||
get_journal_devices(), config('osd-reformat'),
|
||||
config('ignore-device-errors'),
|
||||
config('osd-encrypt'))
|
||||
# Make it fast!
|
||||
if config('autotune'):
|
||||
ceph.tune_dev(dev)
|
||||
mounts = filter(lambda disk: device_path
|
||||
in disk.device, psutil.disk_partitions())
|
||||
if mounts:
|
||||
osd = mounts[0]
|
||||
osd_id = osd.mountpoint.split('/')[-1].split('-')[-1]
|
||||
request.ops.append({
|
||||
'op': 'move-osd-to-bucket',
|
||||
'osd': "osd.{}".format(osd_id),
|
||||
'bucket': bucket})
|
||||
return request
|
||||
|
||||
|
||||
def get_devices():
|
||||
return [
|
||||
os.path.realpath(path)
|
||||
for path in action_get('osd-devices').split(' ')]
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
request = CephBrokerRq()
|
||||
for dev in get_devices():
|
||||
request = add_device(request=request,
|
||||
device_path=dev,
|
||||
bucket=action_get("bucket"))
|
||||
send_request_if_needed(request, relation='mon')
|
@ -24,7 +24,11 @@ from charmhelpers.core.hookenv import (
|
||||
INFO,
|
||||
ERROR,
|
||||
)
|
||||
from ceph import get_cephfs
|
||||
from ceph import (
|
||||
get_cephfs,
|
||||
get_osd_weight
|
||||
)
|
||||
from ceph_helpers import Crushmap
|
||||
from charmhelpers.contrib.storage.linux.ceph import (
|
||||
create_erasure_profile,
|
||||
delete_pool,
|
||||
@ -360,6 +364,36 @@ def handle_rgw_zone_set(request, service):
|
||||
os.unlink(infile.name)
|
||||
|
||||
|
||||
def handle_put_osd_in_bucket(request, service):
|
||||
osd_id = request.get('osd')
|
||||
target_bucket = request.get('bucket')
|
||||
if not osd_id or not target_bucket:
|
||||
msg = "Missing OSD ID or Bucket"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
crushmap = Crushmap()
|
||||
try:
|
||||
crushmap.ensure_bucket_is_present(target_bucket)
|
||||
check_output(
|
||||
[
|
||||
'ceph',
|
||||
'--id', service,
|
||||
'osd',
|
||||
'crush',
|
||||
'set',
|
||||
str(osd_id),
|
||||
str(get_osd_weight(osd_id)),
|
||||
"root={}".format(target_bucket)
|
||||
]
|
||||
)
|
||||
|
||||
except Exception as exc:
|
||||
msg = "Failed to move OSD " \
|
||||
"{} into Bucket {} :: {}".format(osd_id, target_bucket, exc)
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
|
||||
def handle_rgw_create_user(request, service):
|
||||
user_id = request.get('rgw-uid')
|
||||
display_name = request.get('display-name')
|
||||
@ -534,6 +568,8 @@ def process_requests_v1(reqs):
|
||||
ret = handle_rgw_regionmap_default(request=req, service=svc)
|
||||
elif op == "rgw-create-user":
|
||||
ret = handle_rgw_create_user(request=req, service=svc)
|
||||
elif op == "move-osd-to-bucket":
|
||||
ret = handle_put_osd_in_bucket(request=req, service=svc)
|
||||
else:
|
||||
msg = "Unknown operation '%s'" % op
|
||||
log(msg, level=ERROR)
|
||||
|
@ -193,6 +193,11 @@ class Crushmap(object):
|
||||
log("load_crushmap error: {}".format(e))
|
||||
raise "Failed to read Crushmap"
|
||||
|
||||
def ensure_bucket_is_present(self, bucket_name):
|
||||
if bucket_name not in [bucket.name for bucket in self.buckets()]:
|
||||
self.add_bucket(bucket_name)
|
||||
self.save()
|
||||
|
||||
def buckets(self):
|
||||
"""Return a list of buckets that are in the Crushmap."""
|
||||
return self._buckets
|
||||
|
Loading…
Reference in New Issue
Block a user