Fix launching of guests where instances_path is on GlusterFS
The FUSE module does not (currentl) support O_DIRECT on files. This prevents QEMU from starting guests using 'cache=none' for their disks located on a GlusterFS filesystem. The same also applies for a handful of other filesystems (notably tmpfs, or any other FUSE filesystem). This patch introduces a startup check in Nova compute service which tries to create a file $instances_path/.direct_io.test using the O_DIRECT flag. If this succeeds, then cache=none will be used for all disks, otherwise it will fallback to using cache=writethrough. While the latter does not have performance which is as consistent as cache=none, it is still host-crash safe and preserves data integrity with migration, if the filesystem is cache coherant (cluster filesystems like GlusterFS are, NFS by constrast is not). By doing the dynamic check for O_DIRECT, we ensure that if future FUSE modules gain O_DIRECT support, Nova will automatically do the right thing. * nova/tests/test_libvirt.py: Stub out os.open in the _check_xml_and_disk_driver() to enable testing of both O_DIRECT and non-O_DIRECT code paths * nova/tests/test_virt_drivers.py: Set instances_path to the current directory * nova/virt/libvirt.xml.template: Replace hardcoded 'none' string with the '$cachemode' variable for all disks. Add missing 'cache' attribute for the config disk * nova/virt/libvirt/connection.py: Check whether O_DIRECT is supported on the "FLAGS.instances_path" directory and use 'none' for cachemode if it is, 'writethrough' otherwise Bug: 959637 Change-Id: I60cbff1c3ad8299fe2aa37099390f9235f6724d0 Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
This commit is contained in:
@@ -15,6 +15,7 @@
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import errno
|
||||
import eventlet
|
||||
import mox
|
||||
import os
|
||||
@@ -800,6 +801,19 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
(check(tree), expected_result, i))
|
||||
|
||||
def _check_xml_and_disk_driver(self, image_meta):
|
||||
os_open = os.open
|
||||
directio_supported = True
|
||||
|
||||
def os_open_stub(path, flags, *args, **kwargs):
|
||||
if flags & os.O_DIRECT:
|
||||
if not directio_supported:
|
||||
raise OSError(errno.EINVAL,
|
||||
'%s: %s' % (os.strerror(errno.EINVAL), path))
|
||||
flags &= ~os.O_DIRECT
|
||||
return os_open(path, flags, *args, **kwargs)
|
||||
|
||||
self.stubs.Set(os, 'open', os_open_stub)
|
||||
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
network_info = _fake_network_info(self.stubs, 1)
|
||||
@@ -812,6 +826,18 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
for disk in disks:
|
||||
self.assertEqual(disk.get("cache"), "none")
|
||||
|
||||
directio_supported = False
|
||||
|
||||
# The O_DIRECT availability is cached on first use in
|
||||
# LibvirtConnection, hence we re-create it here
|
||||
xml = connection.LibvirtConnection(True).to_xml(instance_ref,
|
||||
network_info,
|
||||
image_meta)
|
||||
tree = ElementTree.fromstring(xml)
|
||||
disks = tree.findall('./devices/disk/driver')
|
||||
for disk in disks:
|
||||
self.assertEqual(disk.get("cache"), "writethrough")
|
||||
|
||||
def _check_xml_and_disk_bus(self, image_meta, device_type, bus):
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
|
@@ -451,6 +451,11 @@ class LibvirtConnTestCase(_VirtDriverTestCase):
|
||||
nova.virt.libvirt.connection.libvirt_utils = fake_libvirt_utils
|
||||
nova.virt.libvirt.firewall.libvirt = fakelibvirt
|
||||
|
||||
# So that the _supports_direct_io does the test based
|
||||
# on the current working directory, instead of the
|
||||
# default instances_path which doesn't exist
|
||||
FLAGS.instances_path = ''
|
||||
|
||||
# Point _VirtDriverTestCase at the right module
|
||||
self.driver_module = nova.virt.libvirt.connection
|
||||
super(LibvirtConnTestCase, self).setUp()
|
||||
|
Reference in New Issue
Block a user