Use library instead of CLI to cleanup RBD volumes

'rbd list' CLI returns error code when there are no rbd volumes, which
causes problems during live migration of VMs with RBD backed ephemeral
volumes. It's safer to use the library that only raises an exception in
case of a real problem.

The only case where rbd CLI is still justified is import, which is
needed to correctly import sparse image files.

All code related to cleanup of RBD volumes is moved to rbd.py, this
fixes a yo-yo problem with single-use methods scattered across 3
different files, and minimizes impact of this fix on imports in
imagebackend and utils.

Closes-Bug: #1346092
Change-Id: I92cd6b16fbd93b377fe47b15d22efbbf68d02513
Signed-off-by: Dmitry Borodaenko <dborodaenko@mirantis.com>
This commit is contained in:
Dmitry Borodaenko
2014-03-13 14:33:11 -07:00
committed by Michael Still
parent 8230b751fb
commit 13e2bd02a5
9 changed files with 82 additions and 153 deletions

View File

@@ -110,10 +110,6 @@ def create_lvm_image(vg, lv, size, sparse=False):
pass
def import_rbd_image(path, *args):
pass
def volume_group_free_space(vg):
pass
@@ -194,17 +190,5 @@ def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
return "qemu"
def list_rbd_volumes(pool):
fake_volumes = ['875a8070-d0b9-4949-8b31-104d125c9a64.local',
'875a8070-d0b9-4949-8b31-104d125c9a64.swap',
'875a8070-d0b9-4949-8b31-104d125c9a64',
'wrong875a8070-d0b9-4949-8b31-104d125c9a64']
return fake_volumes
def remove_rbd_volumes(pool, *names):
pass
def get_arch(image_meta):
pass

View File

@@ -80,6 +80,7 @@ from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import driver as libvirt_driver
from nova.virt.libvirt import firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import rbd
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
@@ -6118,38 +6119,16 @@ class LibvirtConnTestCase(test.TestCase,
"uuid": "875a8070-d0b9-4949-8b31-104d125c9a64"}
conn.destroy(self.context, instance, [])
def test_cleanup_rbd(self):
mock = self.mox.CreateMock(libvirt.virDomain)
def fake_lookup_by_name(instance_name):
return mock
def fake_get_info(instance_name):
return {'state': power_state.SHUTDOWN, 'id': -1}
fake_volumes = ['875a8070-d0b9-4949-8b31-104d125c9a64.local',
'875a8070-d0b9-4949-8b31-104d125c9a64.swap',
'875a8070-d0b9-4949-8b31-104d125c9a64',
'wrong875a8070-d0b9-4949-8b31-104d125c9a64']
fake_pool = 'fake_pool'
fake_instance = {'name': 'fakeinstancename', 'id': 'instanceid',
'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
self.stubs.Set(conn, '_lookup_by_name', fake_lookup_by_name)
self.stubs.Set(conn, 'get_info', fake_get_info)
self.flags(images_rbd_pool=fake_pool, group='libvirt')
self.mox.StubOutWithMock(libvirt_driver.libvirt_utils,
'remove_rbd_volumes')
libvirt_driver.libvirt_utils.remove_rbd_volumes(fake_pool,
*fake_volumes[:3])
self.mox.ReplayAll()
@mock.patch.object(rbd, 'RBDDriver')
def test_cleanup_rbd(self, mock_driver):
driver = mock_driver.return_value
driver.cleanup_volumes = mock.Mock()
fake_instance = {'uuid': '875a8070-d0b9-4949-8b31-104d125c9a64'}
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
conn._cleanup_rbd(fake_instance)
self.mox.VerifyAll()
driver.cleanup_volumes.assert_called_once_with(fake_instance)
def test_destroy_undefines_no_undefine_flags(self):
mock = self.mox.CreateMock(libvirt.virDomain)

View File

@@ -745,19 +745,23 @@ class RbdTestCase(_ImageTestCase, test.NoDBTestCase):
rbd.rbd.RBD_FEATURE_LAYERING = 1
fake_processutils.fake_execute_clear_log()
fake_processutils.stub_out_processutils_execute(self.stubs)
self.mox.StubOutWithMock(imagebackend.disk, 'get_disk_size')
imagebackend.disk.get_disk_size(self.TEMPLATE_PATH
).AndReturn(self.SIZE)
rbd_name = "%s/%s" % (self.INSTANCE['name'], self.NAME)
cmd = ('--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name = "%s_%s" % (self.INSTANCE['uuid'], self.NAME)
cmd = ('rbd', 'import', '--pool', self.POOL, self.TEMPLATE_PATH,
rbd_name, '--new-format', '--id', self.USER,
'--conf', self.CONF)
self.libvirt_utils.import_rbd_image(self.TEMPLATE_PATH, *cmd)
self.mox.ReplayAll()
image = self.image_class(self.INSTANCE, self.NAME)
image.create_image(fn, self.TEMPLATE_PATH, None)
self.assertEqual(fake_processutils.fake_execute_get_log(),
[' '.join(cmd)])
self.mox.VerifyAll()
def test_prealloc_image(self):

View File

@@ -167,3 +167,18 @@ class RbdTestCase(test.NoDBTestCase):
self.assertTrue(self.driver.exists(self.volume_name))
proxy.__enter__.assert_called_once_with()
proxy.__exit__.assert_called_once_with(None, None, None)
@mock.patch.object(rbd, 'rbd')
@mock.patch.object(rbd, 'rados')
@mock.patch.object(rbd, 'RADOSClient')
def test_cleanup_volumes(self, mock_client, mock_rados, mock_rbd):
instance = {'uuid': '12345'}
rbd = mock_rbd.RBD.return_value
rbd.list.return_value = ['12345_test', '111_test']
client = mock_client.return_value
self.driver.cleanup_volumes(instance)
rbd.remove.assert_called_once_with(client.ioctx, '12345_test')
client.__enter__.assert_called_once_with()
client.__exit__.assert_called_once_with(None, None, None)

View File

@@ -47,46 +47,6 @@ blah BLAH: bb
disk_type = libvirt_utils.get_disk_type(path)
self.assertEqual(disk_type, 'raw')
def test_list_rbd_volumes(self):
conf = '/etc/ceph/fake_ceph.conf'
pool = 'fake_pool'
user = 'user'
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self.flags(rbd_user=user, group='libvirt')
self.mox.StubOutWithMock(libvirt_utils.utils,
'execute')
libvirt_utils.utils.execute('rbd', '-p', pool, 'ls', '--id',
user,
'--conf', conf).AndReturn(("Out", "Error"))
self.mox.ReplayAll()
libvirt_utils.list_rbd_volumes(pool)
self.mox.VerifyAll()
def test_remove_rbd_volumes(self):
conf = '/etc/ceph/fake_ceph.conf'
pool = 'fake_pool'
user = 'user'
names = ['volume1', 'volume2', 'volume3']
self.flags(images_rbd_ceph_conf=conf, group='libvirt')
self.flags(rbd_user=user, group='libvirt')
self.mox.StubOutWithMock(libvirt_utils.utils, 'execute')
libvirt_utils.utils.execute('rbd', 'rm', os.path.join(pool, 'volume1'),
'--id', user, '--conf', conf, attempts=3,
run_as_root=True)
libvirt_utils.utils.execute('rbd', 'rm', os.path.join(pool, 'volume2'),
'--id', user, '--conf', conf, attempts=3,
run_as_root=True)
libvirt_utils.utils.execute('rbd', 'rm', os.path.join(pool, 'volume3'),
'--id', user, '--conf', conf, attempts=3,
run_as_root=True)
self.mox.ReplayAll()
libvirt_utils.remove_rbd_volumes(pool, *names)
self.mox.VerifyAll()
@mock.patch('nova.utils.execute')
def test_copy_image_local_cp(self, mock_execute):
libvirt_utils.copy_image('src', 'dest')

View File

@@ -92,6 +92,7 @@ from nova.virt.libvirt import firewall as libvirt_firewall
from nova.virt.libvirt import imagebackend
from nova.virt.libvirt import imagecache
from nova.virt.libvirt import lvm
from nova.virt.libvirt import rbd
from nova.virt.libvirt import utils as libvirt_utils
from nova.virt import netutils
from nova.virt import watchdog_actions
@@ -1099,17 +1100,11 @@ class LibvirtDriver(driver.ComputeDriver):
self._cleanup_rbd(instance)
def _cleanup_rbd(self, instance):
pool = CONF.libvirt.images_rbd_pool
volumes = libvirt_utils.list_rbd_volumes(pool)
pattern = instance['uuid']
def belongs_to_instance(disk):
return disk.startswith(pattern)
volumes = filter(belongs_to_instance, volumes)
if volumes:
libvirt_utils.remove_rbd_volumes(pool, *volumes)
driver = rbd.RBDDriver(
pool=CONF.libvirt.images_rbd_pool,
ceph_conf=CONF.libvirt.images_rbd_ceph_conf,
rbd_user=CONF.libvirt.rbd_user)
driver.cleanup_volumes(instance)
def _cleanup_lvm(self, instance):
"""Delete all LVM disks for given instance object."""

View File

@@ -558,16 +558,9 @@ class Rbd(Image):
else:
self.verify_base_size(base, size)
# keep using the command line import instead of librbd since it
# detects zeroes to preserve sparseness in the image
args = ['--pool', self.pool, base, self.rbd_name]
if self.driver.supports_layering():
args += ['--new-format']
args += self.driver.ceph_args()
libvirt_utils.import_rbd_image(*args)
self.driver.import_image(base, self.rbd_name)
base_size = disk.get_disk_size(base)
if size and size > base_size:
self.driver.resize(self.rbd_name, size)

View File

@@ -23,6 +23,7 @@ except ImportError:
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova import utils
@@ -64,6 +65,19 @@ class RBDVolumeProxy(object):
return getattr(self.volume, attrib)
class RADOSClient(object):
"""Context manager to simplify error handling for connecting to ceph."""
def __init__(self, driver, pool=None):
self.driver = driver
self.cluster, self.ioctx = driver._connect_to_rados(pool)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.driver._disconnect_from_rados(self.cluster, self.ioctx)
class RBDDriver(object):
def __init__(self, pool, ceph_conf, rbd_user):
@@ -145,3 +159,34 @@ class RBDDriver(object):
return True
except rbd.ImageNotFound:
return False
def import_image(self, base, name):
"""Import RBD volume from image file.
Uses the command line import instead of librbd since rbd import
command detects zeroes to preserve sparseness in the image.
:base: Path to image file
:name: Name of RBD volume
"""
args = ['--pool', self.pool, base, name]
if self.supports_layering():
args += ['--new-format']
args += self.ceph_args()
utils.execute('rbd', 'import', *args)
def cleanup_volumes(self, instance):
with RADOSClient(self, self.pool) as client:
def belongs_to_instance(disk):
return disk.startswith(instance['uuid'])
# pylint: disable=E1101
volumes = rbd.RBD().list(client.ioctx)
for volume in filter(belongs_to_instance, volumes):
try:
rbd.RBD().remove(client.ioctx, volume)
except (rbd.ImageNotFound, rbd.ImageHasSnapshots):
LOG.warn(_LW('rbd remove %(volume)s in pool %(pool)s '
'failed'),
{'volume': volume, 'pool': self.pool})

View File

@@ -203,52 +203,6 @@ def create_cow_image(backing_file, path, size=None):
execute(*cmd)
def import_rbd_image(*args):
execute('rbd', 'import', *args)
def _run_rbd(*args, **kwargs):
total = list(args)
if CONF.libvirt.rbd_user:
total.extend(['--id', str(CONF.libvirt.rbd_user)])
if CONF.libvirt.images_rbd_ceph_conf:
total.extend(['--conf', str(CONF.libvirt.images_rbd_ceph_conf)])
return utils.execute(*total, **kwargs)
def list_rbd_volumes(pool):
"""List volumes names for given ceph pool.
:param pool: ceph pool name
"""
try:
out, err = _run_rbd('rbd', '-p', pool, 'ls')
except processutils.ProcessExecutionError:
# No problem when no volume in rbd pool
return []
return [line.strip() for line in out.splitlines()]
def remove_rbd_volumes(pool, *names):
"""Remove one or more rbd volume."""
for name in names:
# NOTE(nic): the rbd command supports two methods for
# specifying a pool name: the "-p" flag, and using the volume
# name notation "pool_name/volume_name"
# The latter method supercedes the former, so to guard
# against slashes in the volume name confusing things, always
# use the path notation
rbd_remove = ('rbd', 'rm', os.path.join(pool, name))
try:
_run_rbd(*rbd_remove, attempts=3, run_as_root=True)
except processutils.ProcessExecutionError:
LOG.warn(_LW("rbd remove %(name)s in pool %(pool)s failed"),
{'name': name, 'pool': pool})
def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
"""Pick the libvirt primary backend driver name