Misc updates for ceph-volume support

Add support for use of the new 'ceph-volume' method for
OSD block device management for Ceph >= 12.2.4 (where the
ceph-volume tool is consistently supported in Ubuntu).

This change requires that block devices be prepared in advance
of calls to ceph-volume (which does not partitioning or setup
of {PV,VG,LV}s).

Block devices are prepare with a single partition of type GPT;
The primary data device for an OSD is prepared with a single
VG and a single LV for bluestore, or two LV's for filestore.

Filestore:

  block /dev/sdb
    pv /dev/sdb1
        vg /dev/ceph-<OSD-UUID>
            lv /dev/ceph-<OSD-UUID>/osd-data-<OSD-UUID>
            lv /dev/ceph-<OSD-UUID>/osd-journal-<OSD-UUID>

Bluestore:

  block /dev/sdb
    pv /dev/sdb1
        vg /dev/ceph-<OSD-UUID>
            lv /dev/ceph-<OSD-UUID>/osd-block-<OSD-UUID>

If separate Filestore Journal or Bluestore DB and WAL devices
have been configured, appropriate {PV,VG,LV} configuration is
created on the configured block devices.

Filestore with Journal device:

  block /dev/sdb
    pv /dev/sdb1
        vg /dev/ceph-<OSD-UUID>
            lv /dev/ceph-<OSD-UUID>/osd-data-<OSD-UUID>
  block /dev/sdc
    pv /dev/sdc1
        vg /dev/ceph-journal-<UUID>
            lv /dev/ceph-journal-<UUID>/osd-journal-<OSD-UUID>

Bluestore with DB and WAL devices:

  block /dev/sdb
    pv /dev/sdb1
        vg /dev/ceph-<OSD-UUID>
            lv /dev/ceph-<OSD-UUID>/osd-block-<OSD-UUID>
  block /dev/sdc
    pv /dev/sdc1
        vg /dev/ceph-db-<UUID>
            lv /dev/ceph-db-<UUID>/osd-db-<OSD-UUID>
  block /dev/sdd
    pv /dev/sdd1
        vg /dev/ceph-wal-<UUID>
            lv /dev/ceph-wal-<UUID>/osd-wal-<OSD-UUID>

ceph-volume makes extensive use of LVM tags to encode required
Ceph information directly into the LVM volume metadata - this
can be view using 'ceph-volume lvm list'.

ceph-volume managed devices are prepared and activated using the
'create' subcommand which differs somewhat from ceph-disk, where
OSD's are activated using udev triggers.

This review also switches to using the charmhelpers zap_disk
function to clean disks prior to use by ceph; this resolves an
outstanding issue with use of Ceph lockbox encryption with
ceph-disk. ceph-volume continues to support the encrypt config
option, applying LUKS based dm-crypt encryption to the LV's
supporting an OSD, with keys stored directly in the MON cluster.

This review also drops support for behaviour in older ceph
releases which are not longer support in Ubuntu (as the author
was not prepared to write new unit test cases for code that
does not ever get run).

Change-Id: I1675b67d364ae6042129a8a717d4bdffff5bde92
This commit is contained in:
James Page 2018-03-28 08:27:37 +01:00
parent 23f0c12c47
commit 68c054fc66
2 changed files with 989 additions and 46 deletions

View File

@ -15,6 +15,7 @@
import collections
import ctypes
import errno
import glob
import json
import os
import pyudev
@ -25,6 +26,7 @@ import subprocess
import sys
import time
import shutil
import uuid
from datetime import datetime
@ -73,6 +75,7 @@ from charmhelpers.contrib.storage.linux.utils import (
from charmhelpers.contrib.openstack.utils import (
get_os_codename_install_source,
)
from charmhelpers.contrib.storage.linux import lvm
CEPH_BASE_DIR = os.path.join(os.sep, 'var', 'lib', 'ceph')
OSD_BASE_DIR = os.path.join(CEPH_BASE_DIR, 'osd')
@ -1406,17 +1409,36 @@ def get_partitions(dev):
return []
def find_least_used_utility_device(utility_devices):
def get_lvs(dev):
"""
List logical volumes for the provided block device
:param: dev: Full path to block device.
:raises subprocess.CalledProcessError: in the event that any supporting
operation failed.
:returns: list: List of logical volumes provided by the block device
"""
pv_dev = _partition_name(dev)
if not lvm.is_lvm_physical_volume(pv_dev):
return []
vg_name = lvm.list_lvm_volume_group(pv_dev)
return lvm.list_logical_volumes('vg_name={}'.format(vg_name))
def find_least_used_utility_device(utility_devices, lvs=False):
"""
Find a utility device which has the smallest number of partitions
among other devices in the supplied list.
:utility_devices: A list of devices to be used for filestore journal
or bluestore wal or db.
:lvs: flag to indicate whether inspection should be based on LVM LV's
:return: string device name
"""
usages = map(lambda a: (len(get_partitions(a)), a), utility_devices)
if lvs:
usages = map(lambda a: (len(get_lvs(a)), a), utility_devices)
else:
usages = map(lambda a: (len(get_partitions(a)), a), utility_devices)
least = min(usages, key=lambda t: t[0])
return least[1]
@ -1467,49 +1489,28 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False,
log('Looks like {} is in use, skipping.'.format(dev))
return
status_set('maintenance', 'Initializing device {}'.format(dev))
cmd = ['ceph-disk', 'prepare']
# Later versions of ceph support more options
if cmp_pkgrevno('ceph', '0.60') >= 0:
if encrypt:
cmd.append('--dmcrypt')
if cmp_pkgrevno('ceph', '0.48.3') >= 0:
if osd_format and not bluestore:
cmd.append('--fs-type')
cmd.append(osd_format)
if is_active_bluestore_device(dev):
log('{} is in use as an active bluestore block device,'
' skipping.'.format(dev))
return
if reformat_osd:
cmd.append('--zap-disk')
if reformat_osd:
zap_disk(dev)
# NOTE(jamespage): enable experimental bluestore support
if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore:
cmd.append('--bluestore')
wal = get_devices('bluestore-wal')
if wal:
cmd.append('--block.wal')
least_used_wal = find_least_used_utility_device(wal)
cmd.append(least_used_wal)
db = get_devices('bluestore-db')
if db:
cmd.append('--block.db')
least_used_db = find_least_used_utility_device(db)
cmd.append(least_used_db)
elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore:
cmd.append('--filestore')
cmd.append(dev)
if osd_journal:
least_used = find_least_used_utility_device(osd_journal)
cmd.append(least_used)
if cmp_pkgrevno('ceph', '12.2.4') >= 0:
cmd = _ceph_volume(dev,
osd_journal,
encrypt,
bluestore)
else:
# Just provide the device - no other options
# for older versions of ceph
cmd.append(dev)
if reformat_osd:
zap_disk(dev)
cmd = _ceph_disk(dev,
osd_format,
osd_journal,
encrypt,
bluestore)
try:
status_set('maintenance', 'Initializing device {}'.format(dev))
log("osdize cmd: {}".format(cmd))
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
@ -1520,6 +1521,289 @@ def osdize_dev(dev, osd_format, osd_journal, reformat_osd=False,
raise
def _ceph_disk(dev, osd_format, osd_journal, encrypt=False, bluestore=False):
"""
Prepare a device for usage as a Ceph OSD using ceph-disk
:param: dev: Full path to use for OSD block device setup
:param: osd_journal: List of block devices to use for OSD journals
:param: encrypt: Use block device encryption (unsupported)
:param: bluestore: Use bluestore storage for OSD
:returns: list. 'ceph-disk' command and required parameters for
execution by check_call
"""
cmd = ['ceph-disk', 'prepare']
if encrypt:
cmd.append('--dmcrypt')
if osd_format and not bluestore:
cmd.append('--fs-type')
cmd.append(osd_format)
# NOTE(jamespage): enable experimental bluestore support
if cmp_pkgrevno('ceph', '10.2.0') >= 0 and bluestore:
cmd.append('--bluestore')
wal = get_devices('bluestore-wal')
if wal:
cmd.append('--block.wal')
least_used_wal = find_least_used_utility_device(wal)
cmd.append(least_used_wal)
db = get_devices('bluestore-db')
if db:
cmd.append('--block.db')
least_used_db = find_least_used_utility_device(db)
cmd.append(least_used_db)
elif cmp_pkgrevno('ceph', '12.1.0') >= 0 and not bluestore:
cmd.append('--filestore')
cmd.append(dev)
if osd_journal:
least_used = find_least_used_utility_device(osd_journal)
cmd.append(least_used)
return cmd
def _ceph_volume(dev, osd_journal, encrypt=False, bluestore=False):
"""
Prepare and activate a device for usage as a Ceph OSD using ceph-volume.
This also includes creation of all PV's, VG's and LV's required to
support the initialization of the OSD.
:param: dev: Full path to use for OSD block device setup
:param: osd_journal: List of block devices to use for OSD journals
:param: encrypt: Use block device encryption
:param: bluestore: Use bluestore storage for OSD
:raises subprocess.CalledProcessError: in the event that any supporting
LVM operation failed.
:returns: list. 'ceph-volume' command and required parameters for
execution by check_call
"""
cmd = ['ceph-volume', 'lvm', 'create']
osd_fsid = str(uuid.uuid4())
cmd.append('--osd-fsid')
cmd.append(osd_fsid)
if bluestore:
cmd.append('--bluestore')
main_device_type = 'block'
else:
cmd.append('--filestore')
main_device_type = 'data'
if encrypt:
cmd.append('--dmcrypt')
# On-disk journal volume creation
if not osd_journal and not bluestore:
journal_lv_type = 'journal'
cmd.append('--journal')
cmd.append(_allocate_logical_volume(
dev,
journal_lv_type,
osd_fsid,
size='{}M'.format(calculate_volume_size('journal')))
)
cmd.append('--data')
cmd.append(_allocate_logical_volume(dev,
main_device_type,
osd_fsid))
if bluestore:
for extra_volume in ('wal', 'db'):
devices = get_devices('bluestore-{}'.format(extra_volume))
if devices:
cmd.append('--block.{}'.format(extra_volume))
least_used = find_least_used_utility_device(devices,
lvs=True)
cmd.append(_allocate_logical_volume(
least_used,
extra_volume,
osd_fsid,
size='{}M'.format(calculate_volume_size(extra_volume)),
shared=True)
)
elif osd_journal:
cmd.append('--journal')
least_used = find_least_used_utility_device(osd_journal,
lvs=True)
cmd.append(_allocate_logical_volume(
least_used,
'journal',
osd_fsid,
size='{}M'.format(calculate_volume_size('journal')),
shared=True)
)
return cmd
def _partition_name(dev):
"""
Derive the first partition name for a block device
:param: dev: Full path to block device.
:returns: str: Full path to first partition on block device.
"""
if dev[-1].isdigit():
return '{}p1'.format(dev)
else:
return '{}1'.format(dev)
# TODO(jamespage): Deal with lockbox encrypted bluestore devices.
def is_active_bluestore_device(dev):
"""
Determine whether provided device is part of an active
bluestore based OSD (as its block component).
:param: dev: Full path to block device to check for Bluestore usage.
:returns: boolean: indicating whether device is in active use.
"""
pv_dev = _partition_name(dev)
if not lvm.is_lvm_physical_volume(pv_dev):
return False
vg_name = lvm.list_lvm_volume_group(pv_dev)
lv_name = lvm.list_logical_volumes('vg_name={}'.format(vg_name))[0]
block_symlinks = glob.glob('/var/lib/ceph/osd/ceph-*/block')
for block_candidate in block_symlinks:
if os.path.islink(block_candidate):
target = os.readlink(block_candidate)
if target.endswith(lv_name):
return True
return False
def get_conf(variable):
"""
Get the value of the given configuration variable from the
cluster.
:param variable: ceph configuration variable
:returns: str. configured value for provided variable
"""
return subprocess.check_output([
'ceph-osd',
'--show-config-value={}'.format(variable),
]).strip()
def calculate_volume_size(lv_type):
"""
Determine the configured size for Bluestore DB/WAL or
Filestore Journal devices
:param lv_type: volume type (db, wal or journal)
:raises KeyError: if invalid lv_type is supplied
:returns: int. Configured size in megabytes for volume type
"""
# lv_type -> ceph configuration option
_config_map = {
'db': 'bluestore_block_db_size',
'wal': 'bluestore_block_wal_size',
'journal': 'osd_journal_size',
}
# default sizes in MB
_default_size = {
'db': 1024,
'wal': 576,
'journal': 1024,
}
# conversion of ceph config units to MB
_units = {
'db': 1048576, # Bytes -> MB
'wal': 1048576, # Bytes -> MB
'journal': 1, # Already in MB
}
configured_size = get_conf(_config_map[lv_type])
if configured_size is None or int(configured_size) == 0:
return _default_size[lv_type]
else:
return int(configured_size) / _units[lv_type]
def _initialize_disk(dev):
"""
Initialize a raw block device with a single paritition
consuming 100% of the avaliable disk space.
Function assumes that block device has already been wiped.
:param: dev: path to block device to initialize
:raises: subprocess.CalledProcessError: if any parted calls fail
:returns: str: Full path to new partition.
"""
partition = _partition_name(dev)
if not os.path.exists(partition):
subprocess.check_call([
'parted', '--script',
dev,
'mklabel',
'gpt',
])
subprocess.check_call([
'parted', '--script',
dev,
'mkpart',
'primary', '1', '100%',
])
return partition
def _allocate_logical_volume(dev, lv_type, osd_fsid,
size=None, shared=False):
"""
Allocate a logical volume from a block device, ensuring any
required initialization and setup of PV's and VG's to support
the LV.
:param: dev: path to block device to allocate from.
:param: lv_type: logical volume type to create
(data, block, journal, wal, db)
:param: osd_fsid: UUID of the OSD associate with the LV
:param: size: Size in LVM format for the device;
if unset 100% of VG
:param: shared: Shared volume group (journal, wal, db)
:raises subprocess.CalledProcessError: in the event that any supporting
LVM or parted operation fails.
:returns: str: String in the format 'vg_name/lv_name'.
"""
lv_name = "osd-{}-{}".format(lv_type, osd_fsid)
current_volumes = lvm.list_logical_volumes()
pv_dev = _initialize_disk(dev)
vg_name = None
if not lvm.is_lvm_physical_volume(pv_dev):
lvm.create_lvm_physical_volume(pv_dev)
if shared:
vg_name = 'ceph-{}-{}'.format(lv_type,
str(uuid.uuid4()))
else:
vg_name = 'ceph-{}'.format(osd_fsid)
lvm.create_lvm_volume_group(vg_name, pv_dev)
else:
vg_name = lvm.list_lvm_volume_group(pv_dev)
if lv_name not in current_volumes:
lvm.create_logical_volume(lv_name, vg_name, size)
return "{}/{}".format(vg_name, lv_name)
def osdize_dir(path, encrypt=False, bluestore=False):
"""Ask ceph-disk to prepare a directory to become an osd.

View File

@ -68,20 +68,52 @@ class CephTestCase(unittest.TestCase):
])
@patch.object(utils.subprocess, 'check_call')
@patch.object(utils, 'zap_disk')
@patch.object(utils, '_ceph_disk')
@patch.object(utils, 'is_active_bluestore_device')
@patch.object(utils.os.path, 'exists')
@patch.object(utils, 'is_device_mounted')
@patch.object(utils, 'cmp_pkgrevno')
@patch.object(utils, 'is_block_device')
def test_osdize_dev(self, _is_blk, _cmp, _mounted, _exists, _call):
"""Test that the dev osd is initialized correctly"""
def test_osdize_dev_ceph_disk(self, _is_blk, _cmp, _mounted, _exists,
_is_active_bluestore_device, _ceph_disk,
_zap_disk, _check_call):
"""Test that _ceph_disk is called for < Luminous 12.2.4"""
_is_blk.return_value = True
_mounted.return_value = False
_exists.return_value = True
_cmp.return_value = True
_cmp.return_value = -1
_ceph_disk.return_value = ['ceph-disk', 'prepare']
_is_active_bluestore_device.return_value = False
utils.osdize('/dev/sdb', osd_format='xfs', osd_journal=None,
reformat_osd=True, bluestore=False)
_call.assert_called_with(['ceph-disk', 'prepare', '--fs-type', 'xfs',
'--zap-disk', '--filestore', '/dev/sdb'])
_ceph_disk.assert_called_with('/dev/sdb', 'xfs', None, False, False)
_check_call.assert_called_with(['ceph-disk', 'prepare'])
_zap_disk.assert_called_once()
@patch.object(utils.subprocess, 'check_call')
@patch.object(utils, 'zap_disk')
@patch.object(utils, '_ceph_volume')
@patch.object(utils, 'is_active_bluestore_device')
@patch.object(utils.os.path, 'exists')
@patch.object(utils, 'is_device_mounted')
@patch.object(utils, 'cmp_pkgrevno')
@patch.object(utils, 'is_block_device')
def test_osdize_dev_ceph_volume(self, _is_blk, _cmp, _mounted, _exists,
_is_active_bluestore_device, _ceph_volume,
_zap_disk, _check_call):
"""Test that _ceph_volume is called for >= Luminous 12.2.4"""
_is_blk.return_value = True
_mounted.return_value = False
_exists.return_value = True
_cmp.return_value = 1
_ceph_volume.return_value = ['ceph-volume', 'prepare']
_is_active_bluestore_device.return_value = False
utils.osdize('/dev/sdb', osd_format='xfs', osd_journal=None,
reformat_osd=True, bluestore=False)
_ceph_volume.assert_called_with('/dev/sdb', None, False, False)
_check_call.assert_called_with(['ceph-volume', 'prepare'])
_zap_disk.assert_called_once()
@patch.object(utils.subprocess, 'check_call')
@patch.object(utils.os.path, 'exists')
@ -537,6 +569,536 @@ class CephTestCase(unittest.TestCase):
])
self.assertEqual(utils.pretty_print_upgrade_paths(), expected)
@patch.object(utils.subprocess, 'check_output')
def test_get_conf(self, _check_output):
_check_output.return_value = '12345\n'
self.assertEqual(utils.get_conf('bluestore_block_db_size'),
'12345')
_check_output.assert_called_with([
'ceph-osd',
'--show-config-value=bluestore_block_db_size',
])
def test_partition_name(self):
self.assertEqual(utils._partition_name('/dev/sdb'),
'/dev/sdb1')
self.assertEqual(utils._partition_name('/dev/mmcblk0'),
'/dev/mmcblk0p1')
class CephVolumeSizeCalculatorTestCase(unittest.TestCase):
@patch.object(utils, 'get_conf')
def test_calculate_volume_size_journal(self, _get_conf):
_get_conf.return_value = 0
self.assertEqual(utils.calculate_volume_size('journal'),
1024)
_get_conf.assert_called_with('osd_journal_size')
_get_conf.return_value = 2048
self.assertEqual(utils.calculate_volume_size('journal'),
2048)
@patch.object(utils, 'get_conf')
def test_calculate_volume_size_db(self, _get_conf):
_get_conf.return_value = 0
self.assertEqual(utils.calculate_volume_size('db'),
1024)
_get_conf.assert_called_with('bluestore_block_db_size')
_get_conf.return_value = 2048 * 1048576
self.assertEqual(utils.calculate_volume_size('db'),
2048)
@patch.object(utils, 'get_conf')
def test_calculate_volume_size_wal(self, _get_conf):
_get_conf.return_value = 0
self.assertEqual(utils.calculate_volume_size('wal'),
576)
_get_conf.assert_called_with('bluestore_block_wal_size')
_get_conf.return_value = 512 * 1048576
self.assertEqual(utils.calculate_volume_size('wal'),
512)
def test_calculate_volume_size_invalid(self):
with self.assertRaises(KeyError):
utils.calculate_volume_size('invalid')
class CephInitializeDiskTestCase(unittest.TestCase):
@patch.object(utils.subprocess, 'check_call')
@patch.object(utils.os.path, 'exists')
def test_initialize_disk(self, _exists, _check_call):
_exists.return_value = False
self.assertEqual(utils._initialize_disk('/dev/sdb'),
'/dev/sdb1')
_check_call.assert_has_calls([
call([
'parted', '--script',
'/dev/sdb',
'mklabel',
'gpt',
]),
call([
'parted', '--script',
'/dev/sdb',
'mkpart',
'primary', '1', '100%',
]),
])
@patch.object(utils.subprocess, 'check_call')
@patch.object(utils.os.path, 'exists')
def test_initialize_disk_exists(self, _exists, _check_call):
_exists.return_value = True
self.assertEqual(utils._initialize_disk('/dev/sdb'),
'/dev/sdb1')
_check_call.assert_not_called()
class CephActiveBlueStoreDeviceTestCase(unittest.TestCase):
_test_pvs = {
'/dev/sdb1': 'ceph-1234',
'/dev/sdc1': 'ceph-5678',
'/dev/sde1': 'ceph-9101',
}
_test_vgs = {
'ceph-1234': ['osd-block-1234'],
'ceph-5678': ['osd-block-5678'],
'ceph-9101': ['osd-block-9101'],
}
_test_links = {
'/var/lib/ceph/osd/ceph-4/block': '/dev/ceph-1234/osd-block-1234',
'/var/lib/ceph/osd/ceph-6/block': '/dev/ceph-5678/osd-block-5678',
}
@patch.object(utils, 'glob')
@patch.object(utils, 'os')
@patch.object(utils, 'lvm')
def _test_active_bluestore_device(self, _lvm, _os, _glob, device, active):
_os.path.is_link.return_value = True
_glob.glob.return_value = self._test_links.keys()
_lvm.is_lvm_physical_volume.side_effect = (
lambda dev: dev in self._test_pvs
)
_lvm.list_lvm_volume_group.side_effect = (
lambda pv: self._test_pvs.get(pv)
)
_lvm.list_logical_volumes.side_effect = (
lambda vg: self._test_vgs.get(vg.lstrip('vg_name='), [])
)
_os.readlink.side_effect = (
lambda link: self._test_links.get(link)
)
pv_dev = utils._partition_name(device)
self.assertEqual(utils.is_active_bluestore_device(device),
active)
_lvm.is_lvm_physical_volume.assert_called_with(pv_dev)
if pv_dev in self._test_pvs:
_lvm.list_lvm_volume_group.assert_called_with(pv_dev)
_lvm.list_logical_volumes.assert_called_with(
'vg_name={}'.format(self._test_pvs.get(pv_dev))
)
_glob.glob.assert_called_with('/var/lib/ceph/osd/ceph-*/block')
else:
_lvm.list_lvm_volume_group.assert_not_called()
_lvm.list_logical_volumes.assert_not_called()
_glob.glob.assert_not_called()
def test_active_bluestore_device_active(self):
self._test_active_bluestore_device(device='/dev/sdb', active=True)
self._test_active_bluestore_device(device='/dev/sdc', active=True)
def test_active_bluestore_device_inactive_not_pv(self):
self._test_active_bluestore_device(device='/dev/sdd', active=False)
def test_active_bluestore_device_inactive_not_inuse(self):
self._test_active_bluestore_device(device='/dev/sde', active=False)
class CephAllocateVolumeTestCase(unittest.TestCase):
_lvs = ['osd-data-1234', 'osd-block-1234', 'osd-journal-1234']
_vgs = {
'/dev/sdb1': 'ceph-1234'
}
@patch.object(utils, '_initialize_disk')
@patch.object(utils.uuid, 'uuid4')
@patch.object(utils, 'lvm')
def _test_allocate_logical_volume(self, _lvm, _uuid4, _initialize_disk,
dev, lv_type, osd_fsid,
size=None, shared=False):
test_uuid = '1234-1234-1234-1234'
pv_dev = utils._partition_name(dev)
_lvm.list_logical_volumes.return_value = self._lvs
_initialize_disk.return_value = pv_dev
_lvm.is_lvm_physical_volume.side_effect = lambda pv: pv in self._vgs
_lvm.list_lvm_volume_group.side_effect = lambda pv: self._vgs.get(pv)
_uuid4.return_value = test_uuid
lv_name = 'osd-{}-{}'.format(lv_type, osd_fsid)
if shared:
vg_name = 'ceph-{}-{}'.format(lv_type, test_uuid)
else:
vg_name = 'ceph-{}'.format(osd_fsid)
self.assertEqual(utils._allocate_logical_volume(dev, lv_type, osd_fsid,
size, shared),
'{}/{}'.format(vg_name, lv_name))
if pv_dev not in self._vgs:
_lvm.create_lvm_physical_volume.assert_called_with(pv_dev)
_lvm.create_lvm_volume_group.assert_called_with(vg_name, pv_dev)
else:
_lvm.create_lvm_physical_volume.assert_not_called()
_lvm.create_lvm_volume_group.assert_not_called()
_lvm.list_lvm_volume_group.assert_called_with(pv_dev)
if lv_name not in self._lvs:
_lvm.create_logical_volume.assert_called_with(lv_name, vg_name,
size)
else:
_lvm.create_logical_volume.assert_not_called()
_initialize_disk.assert_called_with(dev)
def test_allocate_lv_already_pv(self):
self._test_allocate_logical_volume(dev='/dev/sdb', lv_type='data',
osd_fsid='1234')
def test_allocate_lv_new_pv(self):
self._test_allocate_logical_volume(dev='/dev/sdc', lv_type='data',
osd_fsid='5678')
def test_allocate_lv_shared_type(self):
self._test_allocate_logical_volume(dev='/dev/sdc', lv_type='wal',
osd_fsid='5678', shared=True)
def test_allocate_lv_already_exists(self):
self._test_allocate_logical_volume(dev='/dev/sdd', lv_type='data',
osd_fsid='1234')
class CephDiskTestCase(unittest.TestCase):
@patch.object(utils, 'cmp_pkgrevno')
@patch.object(utils, 'find_least_used_utility_device')
@patch.object(utils, 'get_devices')
def test_ceph_disk_filestore(self, _get_devices,
_find_least_used_utility_device,
_cmp_pkgrevno):
# >= Jewel < Luminous RC
_cmp_pkgrevno.side_effect = [1, -1]
_get_devices.return_value = []
self.assertEqual(
utils._ceph_disk('/dev/sdb',
osd_format='xfs',
osd_journal=None,
encrypt=False,
bluestore=False),
['ceph-disk', 'prepare',
'--fs-type', 'xfs',
'/dev/sdb']
)
@patch.object(utils, 'cmp_pkgrevno')
@patch.object(utils, 'find_least_used_utility_device')
@patch.object(utils, 'get_devices')
def test_ceph_disk_filestore_luminous(self, _get_devices,
_find_least_used_utility_device,
_cmp_pkgrevno):
# >= Jewel
_cmp_pkgrevno.return_value = 1
_get_devices.return_value = []
self.assertEqual(
utils._ceph_disk('/dev/sdb',
osd_format='xfs',
osd_journal=None,
encrypt=False,
bluestore=False),
['ceph-disk', 'prepare',
'--fs-type', 'xfs',
'--filestore', '/dev/sdb']
)
@patch.object(utils, 'cmp_pkgrevno')
@patch.object(utils, 'find_least_used_utility_device')
@patch.object(utils, 'get_devices')
def test_ceph_disk_filestore_journal(self, _get_devices,
_find_least_used_utility_device,
_cmp_pkgrevno):
# >= Jewel
_cmp_pkgrevno.return_value = 1
_get_devices.return_value = []
_find_least_used_utility_device.side_effect = \
lambda x, lvs=False: x[0]
self.assertEqual(
utils._ceph_disk('/dev/sdb',
osd_format='xfs',
osd_journal=['/dev/sdc'],
encrypt=False,
bluestore=False),
['ceph-disk', 'prepare',
'--fs-type', 'xfs',
'--filestore', '/dev/sdb',
'/dev/sdc']
)
@patch.object(utils, 'cmp_pkgrevno')
@patch.object(utils, 'find_least_used_utility_device')
@patch.object(utils, 'get_devices')
def test_ceph_disk_bluestore(self, _get_devices,
_find_least_used_utility_device,
_cmp_pkgrevno):
# >= Jewel
_cmp_pkgrevno.return_value = 1
_get_devices.return_value = []
_find_least_used_utility_device.side_effect = \
lambda x, lvs=False: x[0]
self.assertEqual(
utils._ceph_disk('/dev/sdb',
osd_format='xfs',
osd_journal=None,
encrypt=False,
bluestore=True),
['ceph-disk', 'prepare',
'--bluestore', '/dev/sdb']
)
@patch.object(utils, 'cmp_pkgrevno')
@patch.object(utils, 'find_least_used_utility_device')
@patch.object(utils, 'get_devices')
def test_ceph_disk_bluestore_dbwal(self, _get_devices,
_find_least_used_utility_device,
_cmp_pkgrevno):
# >= Jewel
_cmp_pkgrevno.return_value = 1
_bluestore_devs = {
'bluestore-db': ['/dev/sdc'],
'bluestore-wal': ['/dev/sdd'],
}
_get_devices.side_effect = lambda x: _bluestore_devs.get(x, [])
_find_least_used_utility_device.side_effect = \
lambda x, lvs=False: x[0]
self.assertEqual(
utils._ceph_disk('/dev/sdb',
osd_format='xfs',
osd_journal=None,
encrypt=False,
bluestore=True),
['ceph-disk', 'prepare',
'--bluestore',
'--block.wal', '/dev/sdd',
'--block.db', '/dev/sdc',
'/dev/sdb']
)
class CephVolumeTestCase(unittest.TestCase):
_osd_uuid = '22b371a5-0db9-4154-b011-23f8f03c4d8c'
@patch.object(utils.uuid, 'uuid4')
@patch.object(utils, 'calculate_volume_size')
@patch.object(utils, 'find_least_used_utility_device')
@patch.object(utils, 'get_devices')
@patch.object(utils, '_allocate_logical_volume')
def test_ceph_volume_filestore(self, _allocate_logical_volume,
_get_devices,
_find_least_used_utility_device,
_calculate_volume_size, _uuid4):
_get_devices.return_value = []
_calculate_volume_size.return_value = 1024
_uuid4.return_value = self._osd_uuid
_allocate_logical_volume.side_effect = (
lambda dev, lv_type, osd_fsid, size=None, shared=False: (
'ceph-{fsid}/osd-{type}-{fsid}'.format(fsid=osd_fsid,
type=lv_type)
)
)
self.assertEqual(
utils._ceph_volume('/dev/sdb',
osd_journal=None,
encrypt=False,
bluestore=False),
['ceph-volume',
'lvm',
'create',
'--osd-fsid',
self._osd_uuid,
'--filestore',
'--journal',
('ceph-{fsid}/'
'osd-journal-{fsid}').format(fsid=self._osd_uuid),
'--data',
('ceph-{fsid}/'
'osd-data-{fsid}').format(fsid=self._osd_uuid)]
)
_allocate_logical_volume.assert_has_calls([
call('/dev/sdb', 'journal', self._osd_uuid, size='1024M'),
call('/dev/sdb', 'data', self._osd_uuid),
])
_find_least_used_utility_device.assert_not_called()
_calculate_volume_size.assert_called_with('journal')
@patch.object(utils.uuid, 'uuid4')
@patch.object(utils, 'calculate_volume_size')
@patch.object(utils, 'find_least_used_utility_device')
@patch.object(utils, 'get_devices')
@patch.object(utils, '_allocate_logical_volume')
def test_ceph_volume_filestore_db_and_wal(self, _allocate_logical_volume,
_get_devices,
_find_least_used_utility_device,
_calculate_volume_size, _uuid4):
_find_least_used_utility_device.side_effect = \
lambda x, lvs=False: x[0]
_calculate_volume_size.return_value = 1024
_uuid4.return_value = self._osd_uuid
_allocate_logical_volume.side_effect = (
lambda dev, lv_type, osd_fsid, size=None, shared=False: (
'ceph-{fsid}/osd-{type}-{fsid}'.format(fsid=osd_fsid,
type=lv_type)
)
)
self.assertEqual(
utils._ceph_volume('/dev/sdb',
osd_journal=['/dev/sdc'],
encrypt=False,
bluestore=False),
['ceph-volume',
'lvm',
'create',
'--osd-fsid',
self._osd_uuid,
'--filestore',
'--data',
('ceph-{fsid}/'
'osd-data-{fsid}').format(fsid=self._osd_uuid),
'--journal',
('ceph-{fsid}/'
'osd-journal-{fsid}').format(fsid=self._osd_uuid)]
)
_allocate_logical_volume.assert_has_calls([
call('/dev/sdb', 'data', self._osd_uuid),
call('/dev/sdc', 'journal', self._osd_uuid,
shared=True, size='1024M'),
])
_find_least_used_utility_device.assert_has_calls([
call(['/dev/sdc'], lvs=True),
])
_calculate_volume_size.assert_has_calls([
call('journal'),
])
@patch.object(utils.uuid, 'uuid4')
@patch.object(utils, 'calculate_volume_size')
@patch.object(utils, 'find_least_used_utility_device')
@patch.object(utils, 'get_devices')
@patch.object(utils, '_allocate_logical_volume')
def test_ceph_volume_bluestore(self, _allocate_logical_volume,
_get_devices,
_find_least_used_utility_device,
_calculate_volume_size, _uuid4):
_get_devices.return_value = []
_calculate_volume_size.return_value = 1024
_uuid4.return_value = self._osd_uuid
_allocate_logical_volume.side_effect = (
lambda dev, lv_type, osd_fsid, size=None, shared=False: (
'ceph-{fsid}/osd-{type}-{fsid}'.format(fsid=osd_fsid,
type=lv_type)
)
)
self.assertEqual(
utils._ceph_volume('/dev/sdb',
osd_journal=None,
encrypt=False,
bluestore=True),
['ceph-volume',
'lvm',
'create',
'--osd-fsid',
self._osd_uuid,
'--bluestore',
'--data',
('ceph-{fsid}/'
'osd-block-{fsid}').format(fsid=self._osd_uuid)]
)
_allocate_logical_volume.assert_has_calls([
call('/dev/sdb', 'block', self._osd_uuid),
])
_find_least_used_utility_device.assert_not_called()
_calculate_volume_size.assert_not_called()
@patch.object(utils.uuid, 'uuid4')
@patch.object(utils, 'calculate_volume_size')
@patch.object(utils, 'find_least_used_utility_device')
@patch.object(utils, 'get_devices')
@patch.object(utils, '_allocate_logical_volume')
def test_ceph_volume_bluestore_db_and_wal(self, _allocate_logical_volume,
_get_devices,
_find_least_used_utility_device,
_calculate_volume_size, _uuid4):
_bluestore_devs = {
'bluestore-db': ['/dev/sdc'],
'bluestore-wal': ['/dev/sdd'],
}
_get_devices.side_effect = lambda x: _bluestore_devs.get(x, [])
_find_least_used_utility_device.side_effect = \
lambda x, lvs=False: x[0]
_calculate_volume_size.return_value = 1024
_uuid4.return_value = self._osd_uuid
_allocate_logical_volume.side_effect = (
lambda dev, lv_type, osd_fsid, size=None, shared=False: (
'ceph-{fsid}/osd-{type}-{fsid}'.format(fsid=osd_fsid,
type=lv_type)
)
)
self.assertEqual(
utils._ceph_volume('/dev/sdb',
osd_journal=None,
encrypt=False,
bluestore=True),
['ceph-volume',
'lvm',
'create',
'--osd-fsid',
self._osd_uuid,
'--bluestore',
'--data',
('ceph-{fsid}/'
'osd-block-{fsid}').format(fsid=self._osd_uuid),
'--block.wal',
('ceph-{fsid}/'
'osd-wal-{fsid}').format(fsid=self._osd_uuid),
'--block.db',
('ceph-{fsid}/'
'osd-db-{fsid}').format(fsid=self._osd_uuid)]
)
_allocate_logical_volume.assert_has_calls([
call('/dev/sdb', 'block', self._osd_uuid),
call('/dev/sdd', 'wal', self._osd_uuid,
shared=True, size='1024M'),
call('/dev/sdc', 'db', self._osd_uuid,
shared=True, size='1024M'),
])
_find_least_used_utility_device.assert_has_calls([
call(['/dev/sdd'], lvs=True),
call(['/dev/sdc'], lvs=True),
])
_calculate_volume_size.assert_has_calls([
call('wal'),
call('db'),
])
class CephVersionTestCase(unittest.TestCase):
@patch.object(utils, 'get_os_codename_install_source')
@ -559,3 +1121,100 @@ class CephVersionTestCase(unittest.TestCase):
get_os_codename_install_source.return_value = 'ocata'
self.assertEqual(utils.resolve_ceph_version(
'cloud:xenial-ocata'), 'jewel')
class CephFindLeastUsedDeviceTestCase(unittest.TestCase):
_parts = {
'/dev/sdb': ['1', '2', '3'],
'/dev/sdc': ['1']
}
@patch.object(utils, 'get_partitions')
def test_find_least_used_utility_device(self, _get_partitions):
_get_partitions.side_effect = lambda dev: self._parts.get(dev, [])
self.assertEqual(
utils.find_least_used_utility_device(['/dev/sdb',
'/dev/sdx',
'/dev/sdc']),
'/dev/sdx'
)
self.assertEqual(
utils.find_least_used_utility_device(['/dev/sdb', '/dev/sdc']),
'/dev/sdc'
)
self.assertEqual(
utils.find_least_used_utility_device(['/dev/sdb']),
'/dev/sdb'
)
_get_partitions.assert_called()
@patch.object(utils, 'get_lvs')
def test_find_least_used_utility_device_lvs(self, _get_lvs):
_get_lvs.side_effect = lambda dev: self._parts.get(dev, [])
self.assertEqual(
utils.find_least_used_utility_device(['/dev/sdb',
'/dev/sdx',
'/dev/sdc'],
lvs=True),
'/dev/sdx'
)
self.assertEqual(
utils.find_least_used_utility_device(['/dev/sdb', '/dev/sdc'],
lvs=True),
'/dev/sdc'
)
self.assertEqual(
utils.find_least_used_utility_device(['/dev/sdb'],
lvs=True),
'/dev/sdb'
)
_get_lvs.assert_called()
class CephGetLVSTestCase(unittest.TestCase):
_lvs = {
'testvg': ['lv1', 'lv2', 'lv3']
}
@patch.object(utils, 'lvm')
def test_get_lvs(self, _lvm):
_lvm.is_lvm_physical_volume.return_value = True
_lvm.list_lvm_volume_group.return_value = 'testvg'
_lvm.list_logical_volumes.side_effect = (
lambda vg: self._lvs.get(vg.lstrip('vg_name='), [])
)
self.assertEqual(utils.get_lvs('/dev/sdb'),
self._lvs['testvg'])
_lvm.is_lvm_physical_volume.assert_called_with(
utils._partition_name('/dev/sdb')
)
_lvm.list_lvm_volume_group.assert_called_with(
utils._partition_name('/dev/sdb')
)
_lvm.list_logical_volumes.assert_called_with('vg_name=testvg')
@patch.object(utils, 'lvm')
def test_get_lvs_no_lvs(self, _lvm):
_lvm.is_lvm_physical_volume.return_value = True
_lvm.list_lvm_volume_group.return_value = 'missingvg'
_lvm.list_logical_volumes.side_effect = (
lambda vg: self._lvs.get(vg.lstrip('vg_name='), [])
)
self.assertEqual(utils.get_lvs('/dev/sdb'), [])
_lvm.is_lvm_physical_volume.assert_called_with(
utils._partition_name('/dev/sdb')
)
_lvm.list_lvm_volume_group.assert_called_with(
utils._partition_name('/dev/sdb')
)
_lvm.list_logical_volumes.assert_called_with('vg_name=missingvg')
@patch.object(utils, 'lvm')
def test_get_lvs_no_pv(self, _lvm):
_lvm.is_lvm_physical_volume.return_value = False
self.assertEqual(utils.get_lvs('/dev/sdb'), [])
_lvm.is_lvm_physical_volume.assert_called_with(
utils._partition_name('/dev/sdb')
)