fix unmounting of LXC containers

There were two issues here.

1. There was a global object stored for all instances,
thus the last mounted instance was always unmounted.

2. Even if there was only a single LXC instance in use,
the global object would be lost on restart of Nova.

Therefore we reset the internal state for the mount object,
by passing in the mount point to destroy_container(),
and querying the device in use for that mount point.

Fixes bug: 971621
Change-Id: I5442442f00d93f5e8b82f492d62918419db5cd3b
This commit is contained in:
Pádraig Brady 2012-08-01 14:26:54 +01:00
parent 751270fd46
commit a5184d5dbf
7 changed files with 146 additions and 26 deletions

View File

@ -15,9 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova import exception
from nova import flags
from nova import test
from nova import utils
from nova.virt.disk import api as disk_api
from nova.virt import driver
@ -86,6 +89,16 @@ class TestVirtDriver(test.TestCase):
class TestVirtDisk(test.TestCase):
def setUp(self):
super(TestVirtDisk, self).setUp()
self.executes = []
def fake_execute(*cmd, **kwargs):
self.executes.append(cmd)
return None, None
self.stubs.Set(utils, 'execute', fake_execute)
def test_check_safe_path(self):
ret = disk_api._join_and_check_path_within_fs('/foo', 'etc',
'something.conf')
@ -101,3 +114,54 @@ class TestVirtDisk(test.TestCase):
disk_api._inject_file_into_fs,
'/tmp', '/etc/../../../../etc/passwd',
'hax')
def test_lxc_destroy_container(self):
def proc_mounts(self, mount_point):
mount_points = {
'/mnt/loop/nopart': '/dev/loop0',
'/mnt/loop/part': '/dev/mapper/loop0p1',
'/mnt/nbd/nopart': '/dev/nbd15',
'/mnt/nbd/part': '/dev/mapper/nbd15p1',
'/mnt/guestfs': 'guestmount',
}
return mount_points[mount_point]
self.stubs.Set(os.path, 'exists', lambda _: True)
self.stubs.Set(disk_api._DiskImage, '_device_for_path', proc_mounts)
expected_commands = []
disk_api.destroy_container('/mnt/loop/nopart')
expected_commands += [
('umount', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
disk_api.destroy_container('/mnt/loop/part')
expected_commands += [
('umount', '/dev/mapper/loop0p1'),
('kpartx', '-d', '/dev/loop0'),
('losetup', '--detach', '/dev/loop0'),
]
disk_api.destroy_container('/mnt/nbd/nopart')
expected_commands += [
('umount', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
disk_api.destroy_container('/mnt/nbd/part')
expected_commands += [
('umount', '/dev/mapper/nbd15p1'),
('kpartx', '-d', '/dev/nbd15'),
('qemu-nbd', '-d', '/dev/nbd15'),
]
disk_api.destroy_container('/mnt/guestfs')
expected_commands += [
('fusermount', '-u', '/mnt/guestfs'),
]
# It's not worth trying to match the last timeout command
self.executes.pop()
self.assertEqual(self.executes, expected_commands)

View File

@ -173,6 +173,8 @@ def unbind(target):
class _DiskImage(object):
"""Provide operations on a disk image file."""
tmp_prefix = 'openstack-disk-mount-tmp'
def __init__(self, image, partition=None, use_cow=False, mount_dir=None):
# These passed to each mounter
self.image = image
@ -194,18 +196,50 @@ class _DiskImage(object):
msg = _('no capable image handler configured')
raise exception.NovaException(msg)
if mount_dir:
# Note the os.path.ismount() shortcut doesn't
# work with libguestfs due to permissions issues.
device = self._device_for_path(mount_dir)
if device:
self._reset(device)
@staticmethod
def _device_for_path(path):
device = None
with open("/proc/mounts", 'r') as ifp:
for line in ifp:
fields = line.split()
if fields[1] == path:
device = fields[0]
break
return device
def _reset(self, device):
"""Reset internal state for a previously mounted directory."""
mounter_cls = self._handler_class(device=device)
mounter = mounter_cls(image=self.image,
partition=self.partition,
mount_dir=self.mount_dir,
device=device)
self._mounter = mounter
mount_name = os.path.basename(self.mount_dir or '')
self._mkdir = mount_name.startswith(self.tmp_prefix)
@property
def errors(self):
"""Return the collated errors from all operations."""
return '\n--\n'.join([''] + self._errors)
@staticmethod
def _handler_class(mode):
"""Look up the appropriate class to use based on MODE."""
def _handler_class(mode=None, device=None):
"""Look up the appropriate class to use based on MODE or DEVICE."""
for cls in (loop.Mount, nbd.Mount, guestfs.Mount):
if cls.mode == mode:
if mode and cls.mode == mode:
return cls
msg = _("unknown disk image handler: %s") % mode
elif device and cls.device_id_string in device:
return cls
msg = _("no disk image handler for: %s") % mode or device
raise exception.NovaException(msg)
def mount(self):
@ -220,7 +254,7 @@ class _DiskImage(object):
raise exception.NovaException(_('image already mounted'))
if not self.mount_dir:
self.mount_dir = tempfile.mkdtemp()
self.mount_dir = tempfile.mkdtemp(prefix=self.tmp_prefix)
self._mkdir = True
try:
@ -275,18 +309,14 @@ def inject_data(image,
raise exception.NovaException(img.errors)
def setup_container(image, container_dir=None, use_cow=False):
def setup_container(image, container_dir, use_cow=False):
"""Setup the LXC container.
It will mount the loopback image to the container directory in order
to create the root filesystem for the container.
LXC does not support qcow2 images yet.
"""
img = _DiskImage(image=image, use_cow=use_cow, mount_dir=container_dir)
if img.mount():
return img
else:
if not img.mount():
LOG.error(_("Failed to mount container filesystem '%(image)s' "
"on '%(target)s': %(errors)s") %
{"image": img, "target": container_dir,
@ -294,17 +324,15 @@ def setup_container(image, container_dir=None, use_cow=False):
raise exception.NovaException(img.errors)
def destroy_container(img):
def destroy_container(container_dir):
"""Destroy the container once it terminates.
It will umount the container that is mounted,
and delete any linked devices.
LXC does not support qcow2 images yet.
"""
try:
if img:
img.umount()
img = _DiskImage(image=None, mount_dir=container_dir)
img.umount()
except Exception, exn:
LOG.exception(_('Failed to unmount container filesystem: %s'), exn)

View File

@ -25,6 +25,7 @@ from nova.virt.disk import mount
class Mount(mount.Mount):
"""libguestfs support for arbitrary images."""
mode = 'guestfs'
device_id_string = 'guest'
def map_dev(self):
self.mapped = True

View File

@ -22,6 +22,7 @@ from nova.virt.disk import mount
class Mount(mount.Mount):
"""loop back support for raw images."""
mode = 'loop'
device_id_string = mode
def get_dev(self):
out, err = utils.trycmd('losetup', '--find', '--show', self.image,

View File

@ -30,7 +30,7 @@ class Mount(object):
to be called in that order.
"""
def __init__(self, image, mount_dir, partition=None):
def __init__(self, image, mount_dir, partition=None, device=None):
# Input
self.image = image
@ -42,7 +42,24 @@ class Mount(object):
# Internal
self.linked = self.mapped = self.mounted = False
self.device = self.mapped_device = None
self.device = self.mapped_device = device
# Reset to mounted dir if possible
self.reset_dev()
def reset_dev(self):
"""Reset device paths to allow unmounting."""
if not self.device:
return
self.linked = self.mapped = self.mounted = True
device = self.device
if os.path.isabs(device) and os.path.exists(device):
if device.startswith('/dev/mapper/'):
device = os.path.basename(device)
device, self.partition = device.rsplit('p', 1)
self.device = os.path.join('/dev', device)
def get_dev(self):
"""Make the image available as a block device in the file system."""

View File

@ -40,6 +40,7 @@ FLAGS.register_opts(nbd_opts)
class Mount(mount.Mount):
"""qemu-nbd support disk images."""
mode = 'nbd'
device_id_string = mode
# NOTE(padraig): There are three issues with this nbd device handling
# 1. max_nbd_devices should be inferred (#861504)
@ -69,7 +70,11 @@ class Mount(mount.Mount):
return device
def _free_nbd(self, device):
self._DEVICES.append(device)
# The device could already be present if unget_dev
# is called right after a nova restart
# (when destroying an LXC container for example).
if not device in self._DEVICES:
self._DEVICES.append(device)
def get_dev(self):
device = self._allocate_nbd()

View File

@ -271,7 +271,6 @@ class LibvirtDriver(driver.ComputeDriver):
self._host_state = None
self._initiator = None
self._wrapped_conn = None
self.container = None
self.read_only = read_only
if FLAGS.firewall_driver not in firewall.drivers:
FLAGS.set_default('firewall_driver', firewall.drivers[0])
@ -551,7 +550,10 @@ class LibvirtDriver(driver.ComputeDriver):
LOG.info(_('Deleting instance files %(target)s') % locals(),
instance=instance)
if FLAGS.libvirt_type == 'lxc':
disk.destroy_container(self.container)
container_dir = os.path.join(FLAGS.instances_path,
instance['name'],
'rootfs')
disk.destroy_container(container_dir=container_dir)
if os.path.exists(target):
# If we fail to get rid of the directory
# tree, this shouldn't block deletion of
@ -1242,7 +1244,9 @@ class LibvirtDriver(driver.ComputeDriver):
libvirt_utils.write_to_file(basepath('libvirt.xml'), libvirt_xml)
if FLAGS.libvirt_type == 'lxc':
container_dir = '%s/rootfs' % basepath(suffix='')
container_dir = os.path.join(FLAGS.instances_path,
instance['name'],
'rootfs')
libvirt_utils.ensure_tree(container_dir)
# NOTE(dprince): for rescue console.log may already exist... chown it.
@ -1438,9 +1442,9 @@ class LibvirtDriver(driver.ComputeDriver):
instance=instance)
if FLAGS.libvirt_type == 'lxc':
self.container = disk.setup_container(basepath('disk'),
container_dir=container_dir,
use_cow=FLAGS.use_cow_images)
disk.setup_container(basepath('disk'),
container_dir=container_dir,
use_cow=FLAGS.use_cow_images)
if FLAGS.libvirt_type == 'uml':
libvirt_utils.chown(basepath('disk'), 'root')
@ -1569,7 +1573,7 @@ class LibvirtDriver(driver.ComputeDriver):
fs.source_type = "mount"
fs.source_dir = os.path.join(FLAGS.instances_path,
instance['name'],
"rootfs")
'rootfs')
devices.append(fs)
else:
if image_meta and image_meta.get('disk_format') == 'iso':