Fix launching of guests where instances_path is on GlusterFS

The FUSE module does not (currentl) support O_DIRECT on files.
This prevents QEMU from starting guests using 'cache=none' for
their disks located on a GlusterFS filesystem. The same also
applies for a handful of other filesystems (notably tmpfs, or
any other FUSE filesystem).

This patch introduces a startup check in Nova compute service
which tries to create a file $instances_path/.direct_io.test
using the O_DIRECT flag. If this succeeds, then cache=none
will be used for all disks, otherwise it will fallback to
using cache=writethrough. While the latter does not have
performance which is as consistent as cache=none, it is still
host-crash safe and preserves data integrity with migration,
if the filesystem is cache coherant (cluster filesystems like
GlusterFS are, NFS by constrast is not).

By doing the dynamic check for O_DIRECT, we ensure that if
future FUSE modules gain O_DIRECT support, Nova will automatically
do the right thing.

* nova/tests/test_libvirt.py: Stub out os.open in
  the _check_xml_and_disk_driver() to enable testing of
  both O_DIRECT and non-O_DIRECT code paths
* nova/tests/test_virt_drivers.py: Set instances_path to
  the current directory
* nova/virt/libvirt.xml.template: Replace hardcoded 'none'
  string with the '$cachemode' variable for all disks.
  Add missing 'cache' attribute for the config disk
* nova/virt/libvirt/connection.py: Check whether O_DIRECT
  is supported on the "FLAGS.instances_path" directory
  and use 'none' for cachemode if it is, 'writethrough'
  otherwise

Bug: 959637
Change-Id: I60cbff1c3ad8299fe2aa37099390f9235f6724d0
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
This commit is contained in:
Daniel P. Berrange 2012-03-21 11:35:43 +00:00
parent 5c5a5fbaa0
commit 78f3e76d69
4 changed files with 85 additions and 7 deletions

View File

@ -15,6 +15,7 @@
# under the License.
import copy
import errno
import eventlet
import mox
import os
@ -800,6 +801,19 @@ class LibvirtConnTestCase(test.TestCase):
(check(tree), expected_result, i))
def _check_xml_and_disk_driver(self, image_meta):
os_open = os.open
directio_supported = True
def os_open_stub(path, flags, *args, **kwargs):
if flags & os.O_DIRECT:
if not directio_supported:
raise OSError(errno.EINVAL,
'%s: %s' % (os.strerror(errno.EINVAL), path))
flags &= ~os.O_DIRECT
return os_open(path, flags, *args, **kwargs)
self.stubs.Set(os, 'open', os_open_stub)
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)
network_info = _fake_network_info(self.stubs, 1)
@ -812,6 +826,18 @@ class LibvirtConnTestCase(test.TestCase):
for disk in disks:
self.assertEqual(disk.get("cache"), "none")
directio_supported = False
# The O_DIRECT availability is cached on first use in
# LibvirtConnection, hence we re-create it here
xml = connection.LibvirtConnection(True).to_xml(instance_ref,
network_info,
image_meta)
tree = ElementTree.fromstring(xml)
disks = tree.findall('./devices/disk/driver')
for disk in disks:
self.assertEqual(disk.get("cache"), "writethrough")
def _check_xml_and_disk_bus(self, image_meta, device_type, bus):
user_context = context.RequestContext(self.user_id, self.project_id)
instance_ref = db.instance_create(user_context, self.test_instance)

View File

@ -451,6 +451,11 @@ class LibvirtConnTestCase(_VirtDriverTestCase):
nova.virt.libvirt.connection.libvirt_utils = fake_libvirt_utils
nova.virt.libvirt.firewall.libvirt = fakelibvirt
# So that the _supports_direct_io does the test based
# on the current working directory, instead of the
# default instances_path which doesn't exist
FLAGS.instances_path = ''
# Point _VirtDriverTestCase at the right module
self.driver_module = nova.virt.libvirt.connection
super(LibvirtConnTestCase, self).setUp()

View File

@ -66,40 +66,40 @@
#else
#if $getVar('rescue', False)
<disk type='file'>
<driver type='${driver_type}' cache='none'/>
<driver type='${driver_type}' cache='${cachemode}'/>
<source file='${basepath}/disk.rescue'/>
<target dev='${disk_prefix}a' bus='${ephemeral_disk_bus}'/>
</disk>
<disk type='file'>
<driver type='${driver_type}' cache='none'/>
<driver type='${driver_type}' cache='${cachemode}'/>
<source file='${basepath}/disk'/>
<target dev='${disk_prefix}b' bus='${ephemeral_disk_bus}'/>
</disk>
#else
#if not ($getVar('ebs_root', False))
<disk type='file' device='${root_device_type}'>
<driver type='${driver_type}' cache='none'/>
<driver type='${driver_type}' cache='${cachemode}'/>
<source file='${basepath}/disk'/>
<target dev='${root_device}' bus='${root_disk_bus}'/>
</disk>
#end if
#if $getVar('ephemeral_device', False)
<disk type='file'>
<driver type='${driver_type}' cache='none'/>
<driver type='${driver_type}' cache='${cachemode}'/>
<source file='${basepath}/disk.local'/>
<target dev='${ephemeral_device}' bus='${ephemeral_disk_bus}'/>
</disk>
#end if
#for $eph in $ephemerals
<disk type='block'>
<driver type='${driver_type}' cache='none'/>
<driver type='${driver_type}' cache='${cachemode}'/>
<source dev='${basepath}/${eph.device_path}'/>
<target dev='${eph.device}' bus='${ephemeral_disk_bus}'/>
</disk>
#end for
#if $getVar('swap_device', False)
<disk type='file'>
<driver type='${driver_type}' cache='none'/>
<driver type='${driver_type}' cache='${cachemode}'/>
<source file='${basepath}/disk.swap'/>
<target dev='${swap_device}' bus='${ephemeral_disk_bus}'/>
</disk>
@ -110,7 +110,7 @@
#end if
#if $getVar('config_drive', False)
<disk type='file'>
<driver type='raw' />
<driver type='raw' cache='${cachemode}'/>
<source file='${basepath}/disk.config' />
<target dev='${disk_prefix}z' bus='${ephemeral_disk_bus}' />
</disk>

View File

@ -225,8 +225,24 @@ class LibvirtConnection(driver.ComputeDriver):
self.default_second_device = self._disk_prefix + 'b'
self.default_third_device = self._disk_prefix + 'c'
self._disk_cachemode = None
self.image_cache_manager = imagecache.ImageCacheManager()
@property
def disk_cachemode(self):
if self._disk_cachemode is None:
# We prefer 'none' for consistent performance, host crash
# safety & migration correctness by avoiding host page cache.
# Some filesystems (eg GlusterFS via FUSE) don't support
# O_DIRECT though. For those we fallback to 'writethrough'
# which gives host crash safety, and is safe for migration
# provided the filesystem is cache coherant (cluster filesystems
# typically are, but things like NFS are not).
self._disk_cachemode = "none"
if not self._supports_direct_io(FLAGS.instances_path):
self._disk_cachemode = "writethrough"
return self._disk_cachemode
@property
def host_state(self):
if not self._host_state:
@ -995,6 +1011,36 @@ class LibvirtConnection(driver.ComputeDriver):
return {'host': host, 'port': port, 'internal_access_path': None}
@staticmethod
def _supports_direct_io(dirpath):
testfile = os.path.join(dirpath, ".directio.test")
hasDirectIO = True
try:
f = os.open(testfile, os.O_CREAT | os.O_WRONLY | os.O_DIRECT)
os.close(f)
LOG.debug(_("Path '%(path)s' supports direct I/O") %
{'path': dirpath})
except OSError, e:
if e.errno == errno.EINVAL:
LOG.debug(_("Path '%(path)s' does not support direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
hasDirectIO = False
else:
LOG.error(_("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
raise e
except Exception, e:
LOG.error(_("Error on '%(path)s' while checking direct I/O: "
"'%(ex)s'") % {'path': dirpath, 'ex': str(e)})
raise e
finally:
try:
os.unlink(testfile)
except:
pass
return hasDirectIO
@staticmethod
def _cache_image(fn, target, fname, cow=False, size=None, *args, **kwargs):
"""Wrapper for a method that creates an image that caches the image.
@ -1374,6 +1420,7 @@ class LibvirtConnection(driver.ComputeDriver):
xml_info = {'type': FLAGS.libvirt_type,
'name': instance['name'],
'uuid': instance['uuid'],
'cachemode': self.disk_cachemode,
'basepath': os.path.join(FLAGS.instances_path,
instance['name']),
'memory_kb': inst_type['memory_mb'] * 1024,