Refactor code for setting up libvirt disk mappings
Currently the libvirt disk bus is fixed at the time the driver is started. The get_guest_storage_config and _create_image methods thus use some variables initialized in the libvirt driver constructor to determine disk bus / dev name mappings. It will shortly become possible to configure a different disk bus per instance, which invalidates the current assumptions in the code. A further complication is that the _create_image and get_guest_storage_config methods needs to duplicate each others logic for determining disk mapping. To simplify the current code and make it more make flexible introduce a new 'blockinfo.py' module in libvirt, and with it a 'get_disk_mapping' method & associated helper APIs. This method is responsible for examining the instance type and block device info dicts and figuring out the complete list of disks that will be attached to the guest & their optimal disk bus + dev name values. This info is returned in a dict and then passed to _create_image and get_guest_storage_config Thus the logic for determining disk dev names is now isolated in one single place, separate from the main driver code, so it has no need to rely on state in the driver object. Many, many test cases are added to try to thoroughly validate the disk mapping code, since there are a huge set of possible configurations the user may request, making it easy to break the code accidentally. Blueprint: libvirt-custom-hardware Change-Id: I645e69fcc7088674f063f619b2acbbee94d7ba61 Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
This commit is contained in:
@@ -55,6 +55,7 @@ from nova.virt import driver
|
||||
from nova.virt import fake
|
||||
from nova.virt import firewall as base_firewall
|
||||
from nova.virt import images
|
||||
from nova.virt.libvirt import blockinfo
|
||||
from nova.virt.libvirt import config as vconfig
|
||||
from nova.virt.libvirt import driver as libvirt_driver
|
||||
from nova.virt.libvirt import firewall
|
||||
@@ -356,9 +357,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
cfg = conn.get_guest_config(instance_ref,
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, None)
|
||||
None, disk_info)
|
||||
self.assertEquals(cfg.acpi, True)
|
||||
self.assertEquals(cfg.apic, True)
|
||||
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
|
||||
@@ -401,9 +404,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
cfg = conn.get_guest_config(instance_ref,
|
||||
_fake_network_info(self.stubs, 2),
|
||||
None, None)
|
||||
None, disk_info)
|
||||
self.assertEquals(cfg.acpi, True)
|
||||
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
|
||||
self.assertEquals(cfg.vcpus, 1)
|
||||
@@ -433,14 +438,18 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, None,
|
||||
{'root_device_name': 'dev/vdb'})
|
||||
block_device_info = {'root_device_name': '/dev/vdb'}
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref,
|
||||
block_device_info)
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
|
||||
None, block_device_info)
|
||||
self.assertEquals(cfg.acpi, False)
|
||||
self.assertEquals(cfg.memory, 1024 * 1024 * 2)
|
||||
self.assertEquals(cfg.vcpus, 1)
|
||||
self.assertEquals(cfg.os_type, "uml")
|
||||
self.assertEquals(cfg.os_boot_dev, None)
|
||||
self.assertEquals(cfg.os_root, 'dev/vdb')
|
||||
self.assertEquals(cfg.os_root, '/dev/vdb')
|
||||
self.assertEquals(len(cfg.devices), 3)
|
||||
self.assertEquals(type(cfg.devices[0]),
|
||||
vconfig.LibvirtConfigGuestDisk)
|
||||
@@ -458,7 +467,10 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
{'connection_info': conn_info, 'mount_device': '/dev/vdc'},
|
||||
{'connection_info': conn_info, 'mount_device': '/dev/vdd'}]}
|
||||
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, None, info)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref, info)
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, disk_info,
|
||||
None, info)
|
||||
self.assertEquals(type(cfg.devices[2]),
|
||||
vconfig.LibvirtConfigGuestDisk)
|
||||
self.assertEquals(cfg.devices[2].target_dev, 'vdc')
|
||||
@@ -473,12 +485,13 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
# make configdrive.enabled_for() return True
|
||||
instance_ref['config_drive'] = 'ANY_ID'
|
||||
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, None)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
|
||||
|
||||
self.assertEquals(type(cfg.devices[2]),
|
||||
vconfig.LibvirtConfigGuestDisk)
|
||||
self.assertEquals(cfg.devices[2].target_dev,
|
||||
conn.default_last_device)
|
||||
self.assertEquals(cfg.devices[2].target_dev, 'vdz')
|
||||
|
||||
def test_get_guest_config_with_vnc(self):
|
||||
self.flags(libvirt_type='kvm',
|
||||
@@ -489,7 +502,9 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, None)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
|
||||
self.assertEquals(len(cfg.devices), 5)
|
||||
self.assertEquals(type(cfg.devices[0]),
|
||||
vconfig.LibvirtConfigGuestDisk)
|
||||
@@ -513,7 +528,9 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, None)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
|
||||
self.assertEquals(len(cfg.devices), 6)
|
||||
self.assertEquals(type(cfg.devices[0]),
|
||||
vconfig.LibvirtConfigGuestDisk)
|
||||
@@ -542,7 +559,9 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, None)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
|
||||
self.assertEquals(len(cfg.devices), 6)
|
||||
self.assertEquals(type(cfg.devices[0]),
|
||||
vconfig.LibvirtConfigGuestDisk)
|
||||
@@ -571,7 +590,9 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, None)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
|
||||
self.assertEquals(len(cfg.devices), 6)
|
||||
self.assertEquals(type(cfg.devices[0]),
|
||||
vconfig.LibvirtConfigGuestDisk)
|
||||
@@ -600,7 +621,9 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, None)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
cfg = conn.get_guest_config(instance_ref, [], None, disk_info)
|
||||
self.assertEquals(len(cfg.devices), 8)
|
||||
self.assertEquals(type(cfg.devices[0]),
|
||||
vconfig.LibvirtConfigGuestDisk)
|
||||
@@ -629,9 +652,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
conf = conn.get_guest_config(instance_ref,
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, None)
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, disk_info)
|
||||
self.assertEquals(conf.cpu, None)
|
||||
|
||||
def test_get_guest_cpu_config_default_kvm(self):
|
||||
@@ -647,9 +672,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
conf = conn.get_guest_config(instance_ref,
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, None)
|
||||
None, disk_info)
|
||||
self.assertEquals(type(conf.cpu),
|
||||
vconfig.LibvirtConfigGuestCPU)
|
||||
self.assertEquals(conf.cpu.mode, "host-model")
|
||||
@@ -662,9 +689,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
conf = conn.get_guest_config(instance_ref,
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, None)
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, disk_info)
|
||||
self.assertEquals(conf.cpu, None)
|
||||
|
||||
def test_get_guest_cpu_config_default_lxc(self):
|
||||
@@ -674,9 +703,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
conf = conn.get_guest_config(instance_ref,
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, None)
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, disk_info)
|
||||
self.assertEquals(conf.cpu, None)
|
||||
|
||||
def test_get_guest_cpu_config_host_passthrough_new(self):
|
||||
@@ -690,9 +721,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
self.flags(libvirt_cpu_mode="host-passthrough")
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
conf = conn.get_guest_config(instance_ref,
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, None)
|
||||
None, disk_info)
|
||||
self.assertEquals(type(conf.cpu),
|
||||
vconfig.LibvirtConfigGuestCPU)
|
||||
self.assertEquals(conf.cpu.mode, "host-passthrough")
|
||||
@@ -709,9 +742,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
self.flags(libvirt_cpu_mode="host-model")
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
conf = conn.get_guest_config(instance_ref,
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, None)
|
||||
None, disk_info)
|
||||
self.assertEquals(type(conf.cpu),
|
||||
vconfig.LibvirtConfigGuestCPU)
|
||||
self.assertEquals(conf.cpu.mode, "host-model")
|
||||
@@ -729,9 +764,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
self.flags(libvirt_cpu_mode="custom")
|
||||
self.flags(libvirt_cpu_model="Penryn")
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
conf = conn.get_guest_config(instance_ref,
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, None)
|
||||
None, disk_info)
|
||||
self.assertEquals(type(conf.cpu),
|
||||
vconfig.LibvirtConfigGuestCPU)
|
||||
self.assertEquals(conf.cpu.mode, "custom")
|
||||
@@ -747,11 +784,14 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
self.flags(libvirt_cpu_mode="host-passthrough")
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
self.assertRaises(exception.NovaException,
|
||||
conn.get_guest_config,
|
||||
instance_ref,
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, None)
|
||||
None,
|
||||
disk_info)
|
||||
|
||||
def test_get_guest_cpu_config_host_model_old(self):
|
||||
def get_lib_version_stub(self):
|
||||
@@ -781,9 +821,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
|
||||
self.flags(libvirt_cpu_mode="host-model")
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
conf = conn.get_guest_config(instance_ref,
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, None)
|
||||
None, disk_info)
|
||||
self.assertEquals(type(conf.cpu),
|
||||
vconfig.LibvirtConfigGuestCPU)
|
||||
self.assertEquals(conf.cpu.mode, None)
|
||||
@@ -805,9 +847,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
self.flags(libvirt_cpu_mode="custom")
|
||||
self.flags(libvirt_cpu_model="Penryn")
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
conf = conn.get_guest_config(instance_ref,
|
||||
_fake_network_info(self.stubs, 1),
|
||||
None, None)
|
||||
None, disk_info)
|
||||
self.assertEquals(type(conf.cpu),
|
||||
vconfig.LibvirtConfigGuestCPU)
|
||||
self.assertEquals(conf.cpu.mode, None)
|
||||
@@ -1572,14 +1616,16 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn.attach_volume,
|
||||
{"driver_volume_type": "badtype"},
|
||||
{"name": "fake-instance"},
|
||||
"/dev/fake")
|
||||
"/dev/sda")
|
||||
|
||||
def test_multi_nic(self):
|
||||
instance_data = dict(self.test_instance)
|
||||
network_info = _fake_network_info(self.stubs, 2)
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
instance_ref = db.instance_create(self.context, instance_data)
|
||||
xml = conn.to_xml(instance_ref, network_info, None, False)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
xml = conn.to_xml(instance_ref, network_info, disk_info)
|
||||
tree = etree.fromstring(xml)
|
||||
interfaces = tree.findall("./devices/interface")
|
||||
self.assertEquals(len(interfaces), 2)
|
||||
@@ -1599,7 +1645,9 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.assertEquals(conn.uri, 'lxc:///')
|
||||
|
||||
network_info = _fake_network_info(self.stubs, 1)
|
||||
xml = conn.to_xml(instance_ref, network_info)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
xml = conn.to_xml(instance_ref, network_info, disk_info)
|
||||
tree = etree.fromstring(xml)
|
||||
|
||||
check = [
|
||||
@@ -1640,7 +1688,9 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
|
||||
network_info = _fake_network_info(self.stubs, 1)
|
||||
xml = conn.to_xml(instance_ref, network_info)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
xml = conn.to_xml(instance_ref, network_info, disk_info)
|
||||
tree = etree.fromstring(xml)
|
||||
|
||||
for i, (check, expected_result) in enumerate(checks):
|
||||
@@ -1673,8 +1723,10 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
network_info = _fake_network_info(self.stubs, 1)
|
||||
|
||||
xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
|
||||
instance_ref, network_info, image_meta)
|
||||
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
|
||||
tree = etree.fromstring(xml)
|
||||
disks = tree.findall('./devices/disk/driver')
|
||||
for disk in disks:
|
||||
@@ -1684,8 +1736,10 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
# The O_DIRECT availability is cached on first use in
|
||||
# LibvirtDriver, hence we re-create it here
|
||||
xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
|
||||
instance_ref, network_info, image_meta)
|
||||
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
|
||||
tree = etree.fromstring(xml)
|
||||
disks = tree.findall('./devices/disk/driver')
|
||||
for disk in disks:
|
||||
@@ -1697,11 +1751,13 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
network_info = _fake_network_info(self.stubs, 1)
|
||||
|
||||
xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
|
||||
instance_ref,
|
||||
network_info,
|
||||
image_meta,
|
||||
block_device_info=block_device_info)
|
||||
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref,
|
||||
block_device_info,
|
||||
image_meta)
|
||||
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta,
|
||||
block_device_info=block_device_info)
|
||||
tree = etree.fromstring(xml)
|
||||
|
||||
got_disks = tree.findall('./devices/disk')
|
||||
@@ -1724,8 +1780,10 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
network_info = _fake_network_info(self.stubs, 1)
|
||||
|
||||
xml = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True).to_xml(
|
||||
instance_ref, network_info, image_meta)
|
||||
drv = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), True)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref)
|
||||
xml = drv.to_xml(instance_ref, network_info, disk_info, image_meta)
|
||||
tree = etree.fromstring(xml)
|
||||
self.assertEqual(tree.find('./uuid').text,
|
||||
instance_ref['uuid'])
|
||||
@@ -1879,7 +1937,11 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.assertEquals(conn.uri, expected_uri)
|
||||
|
||||
network_info = _fake_network_info(self.stubs, 1)
|
||||
xml = conn.to_xml(instance_ref, network_info, None, rescue)
|
||||
disk_info = blockinfo.get_disk_info(CONF.libvirt_type,
|
||||
instance_ref,
|
||||
rescue=rescue)
|
||||
xml = conn.to_xml(instance_ref, network_info, disk_info,
|
||||
rescue=rescue)
|
||||
tree = etree.fromstring(xml)
|
||||
for i, (check, expected_result) in enumerate(checks):
|
||||
self.assertEqual(check(tree),
|
||||
@@ -2216,9 +2278,14 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
).AndReturn(vol['block_device_mapping'])
|
||||
self.mox.StubOutWithMock(conn, "volume_driver_method")
|
||||
for v in vol['block_device_mapping']:
|
||||
disk_info = {
|
||||
'bus': "scsi",
|
||||
'dev': v['mount_device'].rpartition("/")[2],
|
||||
'type': "disk"
|
||||
}
|
||||
conn.volume_driver_method('connect_volume',
|
||||
v['connection_info'],
|
||||
v['mount_device'].rpartition("/")[2])
|
||||
v['connection_info'],
|
||||
disk_info)
|
||||
self.mox.StubOutWithMock(conn, 'plug_vifs')
|
||||
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
|
||||
|
||||
@@ -2244,10 +2311,14 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
# Creating mocks
|
||||
self.mox.StubOutWithMock(conn, "volume_driver_method")
|
||||
for v in vol['block_device_mapping']:
|
||||
disk_info = {
|
||||
'bus': "scsi",
|
||||
'dev': v['mount_device'].rpartition("/")[2],
|
||||
'type': "disk"
|
||||
}
|
||||
conn.volume_driver_method('connect_volume',
|
||||
v['connection_info'],
|
||||
v['mount_device'].
|
||||
rpartition("/")[2])
|
||||
disk_info)
|
||||
self.mox.StubOutWithMock(conn, 'plug_vifs')
|
||||
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
|
||||
self.mox.ReplayAll()
|
||||
@@ -4254,16 +4325,18 @@ class LibvirtDriverTestCase(test.TestCase):
|
||||
def fake_extend(path, size):
|
||||
pass
|
||||
|
||||
def fake_to_xml(instance, network_info, image_meta=None, rescue=None,
|
||||
def fake_to_xml(instance, network_info, disk_info,
|
||||
image_meta=None, rescue=None,
|
||||
block_device_info=None, write_to_disk=False):
|
||||
return ""
|
||||
|
||||
def fake_plug_vifs(instance, network_info):
|
||||
pass
|
||||
|
||||
def fake_create_image(context, inst, libvirt_xml, suffix='',
|
||||
disk_images=None, network_info=None,
|
||||
block_device_info=None):
|
||||
def fake_create_image(context, inst, libvirt_xml,
|
||||
disk_mapping, suffix='',
|
||||
disk_images=None, network_info=None,
|
||||
block_device_info=None):
|
||||
pass
|
||||
|
||||
def fake_create_domain(xml, instance=None):
|
||||
@@ -4321,7 +4394,8 @@ class LibvirtDriverTestCase(test.TestCase):
|
||||
def fake_get_info(instance):
|
||||
return {'state': power_state.RUNNING}
|
||||
|
||||
def fake_to_xml(instance, network_info, image_meta=None, rescue=None,
|
||||
def fake_to_xml(instance, network_info, disk_info,
|
||||
image_meta=None, rescue=None,
|
||||
block_device_info=None):
|
||||
return ""
|
||||
|
||||
|
||||
396
nova/tests/test_libvirt_blockinfo.py
Normal file
396
nova/tests/test_libvirt_blockinfo.py
Normal file
@@ -0,0 +1,396 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
#
|
||||
# Copyright 2010 OpenStack LLC
|
||||
# Copyright 2012 University Of Minho
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from nova import block_device
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import test
|
||||
import nova.tests.image.fake
|
||||
from nova.virt.libvirt import blockinfo
|
||||
|
||||
|
||||
class LibvirtBlockInfoTest(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(LibvirtBlockInfoTest, self).setUp()
|
||||
|
||||
self.user_id = 'fake'
|
||||
self.project_id = 'fake'
|
||||
self.context = context.get_admin_context()
|
||||
nova.tests.image.fake.stub_out_image_service(self.stubs)
|
||||
self.test_instance = {
|
||||
'uuid': '32dfcb37-5af1-552b-357c-be8c3aa38310',
|
||||
'memory_kb': '1024000',
|
||||
'basepath': '/some/path',
|
||||
'bridge_name': 'br100',
|
||||
'vcpus': 2,
|
||||
'project_id': 'fake',
|
||||
'bridge': 'br101',
|
||||
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
|
||||
'root_gb': 10,
|
||||
'ephemeral_gb': 20,
|
||||
'instance_type_id': '5'} # m1.small
|
||||
|
||||
def test_volume_in_mapping(self):
|
||||
swap = {'device_name': '/dev/sdb',
|
||||
'swap_size': 1}
|
||||
ephemerals = [{'num': 0,
|
||||
'virtual_name': 'ephemeral0',
|
||||
'device_name': '/dev/sdc1',
|
||||
'size': 1},
|
||||
{'num': 2,
|
||||
'virtual_name': 'ephemeral2',
|
||||
'device_name': '/dev/sdd',
|
||||
'size': 1}]
|
||||
block_device_mapping = [{'mount_device': '/dev/sde',
|
||||
'device_path': 'fake_device'},
|
||||
{'mount_device': '/dev/sdf',
|
||||
'device_path': 'fake_device'}]
|
||||
block_device_info = {
|
||||
'root_device_name': '/dev/sda',
|
||||
'swap': swap,
|
||||
'ephemerals': ephemerals,
|
||||
'block_device_mapping': block_device_mapping}
|
||||
|
||||
def _assert_volume_in_mapping(device_name, true_or_false):
|
||||
self.assertEquals(
|
||||
block_device.volume_in_mapping(device_name,
|
||||
block_device_info),
|
||||
true_or_false)
|
||||
|
||||
_assert_volume_in_mapping('sda', False)
|
||||
_assert_volume_in_mapping('sdb', True)
|
||||
_assert_volume_in_mapping('sdc1', True)
|
||||
_assert_volume_in_mapping('sdd', True)
|
||||
_assert_volume_in_mapping('sde', True)
|
||||
_assert_volume_in_mapping('sdf', True)
|
||||
_assert_volume_in_mapping('sdg', False)
|
||||
_assert_volume_in_mapping('sdh1', False)
|
||||
|
||||
def test_find_disk_dev(self):
|
||||
mapping = {
|
||||
"disk.local": {
|
||||
'dev': 'sda',
|
||||
'bus': 'scsi',
|
||||
'type': 'disk',
|
||||
},
|
||||
"disk.swap": {
|
||||
'dev': 'sdc',
|
||||
'bus': 'scsi',
|
||||
'type': 'disk',
|
||||
},
|
||||
}
|
||||
|
||||
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi')
|
||||
self.assertEqual(dev, 'sdb')
|
||||
|
||||
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'scsi',
|
||||
last_device=True)
|
||||
self.assertEqual(dev, 'sdz')
|
||||
|
||||
dev = blockinfo.find_disk_dev_for_disk_bus(mapping, 'virtio')
|
||||
self.assertEqual(dev, 'vda')
|
||||
|
||||
def test_get_next_disk_dev(self):
|
||||
mapping = {}
|
||||
mapping['disk.local'] = blockinfo.get_next_disk_info(mapping,
|
||||
'virtio')
|
||||
self.assertEqual(mapping['disk.local'],
|
||||
{'dev': 'vda', 'bus': 'virtio', 'type': 'disk'})
|
||||
|
||||
mapping['disk.swap'] = blockinfo.get_next_disk_info(mapping,
|
||||
'virtio')
|
||||
self.assertEqual(mapping['disk.swap'],
|
||||
{'dev': 'vdb', 'bus': 'virtio', 'type': 'disk'})
|
||||
|
||||
mapping['disk.config'] = blockinfo.get_next_disk_info(mapping,
|
||||
'ide',
|
||||
'cdrom',
|
||||
True)
|
||||
self.assertEqual(mapping['disk.config'],
|
||||
{'dev': 'hdd', 'bus': 'ide', 'type': 'cdrom'})
|
||||
|
||||
def test_get_disk_mapping_simple(self):
|
||||
# The simplest possible disk mapping setup, all defaults
|
||||
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
|
||||
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
|
||||
"virtio", "ide")
|
||||
|
||||
expect = {
|
||||
'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
|
||||
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
|
||||
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
|
||||
}
|
||||
self.assertEqual(mapping, expect)
|
||||
|
||||
def test_get_disk_mapping_simple_rootdev(self):
|
||||
# A simple disk mapping setup, but with custom root device name
|
||||
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
block_device_info = {
|
||||
'root_device_name': '/dev/sda'
|
||||
}
|
||||
|
||||
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
|
||||
"virtio", "ide",
|
||||
block_device_info)
|
||||
|
||||
expect = {
|
||||
'disk': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'},
|
||||
'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
|
||||
'root': {'bus': 'scsi', 'dev': 'sda', 'type': 'disk'}
|
||||
}
|
||||
self.assertEqual(mapping, expect)
|
||||
|
||||
def test_get_disk_mapping_rescue(self):
|
||||
# A simple disk mapping setup, but in rescue mode
|
||||
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
|
||||
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
|
||||
"virtio", "ide",
|
||||
rescue=True)
|
||||
|
||||
expect = {
|
||||
'disk.rescue': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
|
||||
'disk': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
|
||||
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
|
||||
}
|
||||
self.assertEqual(mapping, expect)
|
||||
|
||||
def test_get_disk_mapping_simple_iso(self):
|
||||
# A simple disk mapping setup, but with a ISO for root device
|
||||
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
image_meta = {'disk_format': 'iso'}
|
||||
|
||||
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
|
||||
"virtio", "ide",
|
||||
None,
|
||||
image_meta)
|
||||
|
||||
expect = {
|
||||
'disk': {'bus': 'ide', 'dev': 'hda', 'type': 'cdrom'},
|
||||
'disk.local': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
|
||||
'root': {'bus': 'ide', 'dev': 'hda', 'type': 'cdrom'}
|
||||
}
|
||||
self.assertEqual(mapping, expect)
|
||||
|
||||
def test_get_disk_mapping_simple_swap(self):
|
||||
# A simple disk mapping setup, but with a swap device added
|
||||
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
instance_ref['instance_type']['swap'] = 5
|
||||
|
||||
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
|
||||
"virtio", "ide")
|
||||
|
||||
expect = {
|
||||
'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
|
||||
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
|
||||
'disk.swap': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
|
||||
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
|
||||
}
|
||||
self.assertEqual(mapping, expect)
|
||||
|
||||
def test_get_disk_mapping_simple_configdrive(self):
|
||||
# A simple disk mapping setup, but with configdrive added
|
||||
|
||||
self.flags(force_config_drive=True)
|
||||
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
|
||||
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
|
||||
"virtio", "ide")
|
||||
|
||||
expect = {
|
||||
'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
|
||||
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
|
||||
'disk.config': {'bus': 'virtio', 'dev': 'vdz', 'type': 'disk'},
|
||||
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
|
||||
}
|
||||
self.assertEqual(mapping, expect)
|
||||
|
||||
def test_get_disk_mapping_ephemeral(self):
|
||||
# A disk mapping with ephemeral devices
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
instance_ref['instance_type']['swap'] = 5
|
||||
|
||||
block_device_info = {
|
||||
'ephemerals': [
|
||||
{'num': 0, 'virtual_name': 'ephemeral0',
|
||||
'device_name': '/dev/vdb', 'size': 10},
|
||||
{'num': 1, 'virtual_name': 'ephemeral1',
|
||||
'device_name': '/dev/vdc', 'size': 10},
|
||||
{'num': 2, 'virtual_name': 'ephemeral2',
|
||||
'device_name': '/dev/vdd', 'size': 10},
|
||||
]
|
||||
}
|
||||
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
|
||||
"virtio", "ide",
|
||||
block_device_info)
|
||||
|
||||
expect = {
|
||||
'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
|
||||
'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
|
||||
'disk.eph1': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
|
||||
'disk.eph2': {'bus': 'virtio', 'dev': 'vdd', 'type': 'disk'},
|
||||
'disk.swap': {'bus': 'virtio', 'dev': 'vde', 'type': 'disk'},
|
||||
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
|
||||
}
|
||||
self.assertEqual(mapping, expect)
|
||||
|
||||
def test_get_disk_mapping_custom_swap(self):
|
||||
# A disk mapping with a swap device at position vdb. This
|
||||
# should cause disk.local to be removed
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
|
||||
block_device_info = {
|
||||
'swap': {'device_name': '/dev/vdb',
|
||||
'swap_size': 10},
|
||||
}
|
||||
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
|
||||
"virtio", "ide",
|
||||
block_device_info)
|
||||
|
||||
expect = {
|
||||
'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
|
||||
'disk.swap': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
|
||||
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
|
||||
}
|
||||
self.assertEqual(mapping, expect)
|
||||
|
||||
def test_get_disk_mapping_blockdev_root(self):
|
||||
# A disk mapping with a blockdev replacing the default root
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
|
||||
block_device_info = {
|
||||
'block_device_mapping': [
|
||||
{'connection_info': "fake",
|
||||
'mount_device': "/dev/vda",
|
||||
'delete_on_termination': True},
|
||||
]
|
||||
}
|
||||
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
|
||||
"virtio", "ide",
|
||||
block_device_info)
|
||||
|
||||
expect = {
|
||||
'/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
|
||||
'disk.local': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
|
||||
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
|
||||
}
|
||||
self.assertEqual(mapping, expect)
|
||||
|
||||
def test_get_disk_mapping_blockdev_eph(self):
|
||||
# A disk mapping with a blockdev replacing the ephemeral device
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
|
||||
block_device_info = {
|
||||
'block_device_mapping': [
|
||||
{'connection_info': "fake",
|
||||
'mount_device': "/dev/vdb",
|
||||
'delete_on_termination': True},
|
||||
]
|
||||
}
|
||||
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
|
||||
"virtio", "ide",
|
||||
block_device_info)
|
||||
|
||||
expect = {
|
||||
'disk': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
|
||||
'/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
|
||||
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
|
||||
}
|
||||
self.assertEqual(mapping, expect)
|
||||
|
||||
def test_get_disk_mapping_blockdev_many(self):
|
||||
# A disk mapping with a blockdev replacing all devices
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
|
||||
block_device_info = {
|
||||
'block_device_mapping': [
|
||||
{'connection_info': "fake",
|
||||
'mount_device': "/dev/vda",
|
||||
'delete_on_termination': True},
|
||||
{'connection_info': "fake",
|
||||
'mount_device': "/dev/vdb",
|
||||
'delete_on_termination': True},
|
||||
{'connection_info': "fake",
|
||||
'mount_device': "/dev/vdc",
|
||||
'delete_on_termination': True},
|
||||
]
|
||||
}
|
||||
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
|
||||
"virtio", "ide",
|
||||
block_device_info)
|
||||
|
||||
expect = {
|
||||
'/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
|
||||
'/dev/vdb': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
|
||||
'/dev/vdc': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
|
||||
'root': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'}
|
||||
}
|
||||
self.assertEqual(mapping, expect)
|
||||
|
||||
def test_get_disk_mapping_complex(self):
|
||||
# The strangest possible disk mapping setup
|
||||
user_context = context.RequestContext(self.user_id, self.project_id)
|
||||
instance_ref = db.instance_create(user_context, self.test_instance)
|
||||
|
||||
block_device_info = {
|
||||
'root_device_name': '/dev/vdf',
|
||||
'swap': {'device_name': '/dev/vdy',
|
||||
'swap_size': 10},
|
||||
'ephemerals': [
|
||||
{'num': 0, 'virtual_name': 'ephemeral0',
|
||||
'device_name': '/dev/vdb', 'size': 10},
|
||||
{'num': 1, 'virtual_name': 'ephemeral1',
|
||||
'device_name': '/dev/vdc', 'size': 10},
|
||||
],
|
||||
'block_device_mapping': [
|
||||
{'connection_info': "fake",
|
||||
'mount_device': "/dev/vda",
|
||||
'delete_on_termination': True},
|
||||
]
|
||||
}
|
||||
mapping = blockinfo.get_disk_mapping("kvm", instance_ref,
|
||||
"virtio", "ide",
|
||||
block_device_info)
|
||||
|
||||
expect = {
|
||||
'disk': {'bus': 'virtio', 'dev': 'vdf', 'type': 'disk'},
|
||||
'/dev/vda': {'bus': 'virtio', 'dev': 'vda', 'type': 'disk'},
|
||||
'disk.eph0': {'bus': 'virtio', 'dev': 'vdb', 'type': 'disk'},
|
||||
'disk.eph1': {'bus': 'virtio', 'dev': 'vdc', 'type': 'disk'},
|
||||
'disk.swap': {'bus': 'virtio', 'dev': 'vdy', 'type': 'disk'},
|
||||
'root': {'bus': 'virtio', 'dev': 'vdf', 'type': 'disk'}
|
||||
}
|
||||
self.assertEqual(mapping, expect)
|
||||
@@ -66,8 +66,12 @@ class LibvirtVolumeTestCase(test.TestCase):
|
||||
},
|
||||
'serial': 'fake_serial',
|
||||
}
|
||||
mount_device = "vde"
|
||||
conf = libvirt_driver.connect_volume(connection_info, mount_device)
|
||||
disk_info = {
|
||||
"bus": "virtio",
|
||||
"dev": "vde",
|
||||
"type": "disk",
|
||||
}
|
||||
conf = libvirt_driver.connect_volume(connection_info, disk_info)
|
||||
tree = conf.format_dom()
|
||||
self.assertEqual(tree.get('type'), 'block')
|
||||
self.assertEqual(tree.find('./serial').text, 'fake_serial')
|
||||
@@ -92,13 +96,17 @@ class LibvirtVolumeTestCase(test.TestCase):
|
||||
iqn = 'iqn.2010-10.org.openstack:%s' % name
|
||||
vol = {'id': 1, 'name': name}
|
||||
connection_info = self.iscsi_connection(vol, location, iqn)
|
||||
mount_device = "vde"
|
||||
conf = libvirt_driver.connect_volume(connection_info, mount_device)
|
||||
disk_info = {
|
||||
"bus": "virtio",
|
||||
"dev": "vde",
|
||||
"type": "disk",
|
||||
}
|
||||
conf = libvirt_driver.connect_volume(connection_info, disk_info)
|
||||
tree = conf.format_dom()
|
||||
dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
|
||||
self.assertEqual(tree.get('type'), 'block')
|
||||
self.assertEqual(tree.find('./source').get('dev'), dev_str)
|
||||
libvirt_driver.disconnect_volume(connection_info, mount_device)
|
||||
libvirt_driver.disconnect_volume(connection_info, "vde")
|
||||
expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
|
||||
'-p', location),
|
||||
('iscsiadm', '-m', 'node', '-T', iqn,
|
||||
@@ -126,13 +134,17 @@ class LibvirtVolumeTestCase(test.TestCase):
|
||||
self.stubs.Set(self.fake_conn, 'get_all_block_devices', lambda: devs)
|
||||
vol = {'id': 1, 'name': name}
|
||||
connection_info = self.iscsi_connection(vol, location, iqn)
|
||||
mount_device = "vde"
|
||||
conf = libvirt_driver.connect_volume(connection_info, mount_device)
|
||||
disk_info = {
|
||||
"bus": "virtio",
|
||||
"dev": "vde",
|
||||
"type": "disk",
|
||||
}
|
||||
conf = libvirt_driver.connect_volume(connection_info, disk_info)
|
||||
tree = conf.format_dom()
|
||||
dev_str = '/dev/disk/by-path/ip-%s-iscsi-%s-lun-1' % (location, iqn)
|
||||
self.assertEqual(tree.get('type'), 'block')
|
||||
self.assertEqual(tree.find('./source').get('dev'), dev_str)
|
||||
libvirt_driver.disconnect_volume(connection_info, mount_device)
|
||||
libvirt_driver.disconnect_volume(connection_info, "vde")
|
||||
expected_commands = [('iscsiadm', '-m', 'node', '-T', iqn,
|
||||
'-p', location),
|
||||
('iscsiadm', '-m', 'node', '-T', iqn,
|
||||
@@ -155,13 +167,17 @@ class LibvirtVolumeTestCase(test.TestCase):
|
||||
name = 'volume-00000001'
|
||||
vol = {'id': 1, 'name': name}
|
||||
connection_info = self.sheepdog_connection(vol)
|
||||
mount_device = "vde"
|
||||
conf = libvirt_driver.connect_volume(connection_info, mount_device)
|
||||
disk_info = {
|
||||
"bus": "virtio",
|
||||
"dev": "vde",
|
||||
"type": "disk",
|
||||
}
|
||||
conf = libvirt_driver.connect_volume(connection_info, disk_info)
|
||||
tree = conf.format_dom()
|
||||
self.assertEqual(tree.get('type'), 'network')
|
||||
self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
|
||||
self.assertEqual(tree.find('./source').get('name'), name)
|
||||
libvirt_driver.disconnect_volume(connection_info, mount_device)
|
||||
libvirt_driver.disconnect_volume(connection_info, "vde")
|
||||
|
||||
def rbd_connection(self, volume):
|
||||
return {
|
||||
@@ -180,15 +196,19 @@ class LibvirtVolumeTestCase(test.TestCase):
|
||||
name = 'volume-00000001'
|
||||
vol = {'id': 1, 'name': name}
|
||||
connection_info = self.rbd_connection(vol)
|
||||
mount_device = "vde"
|
||||
conf = libvirt_driver.connect_volume(connection_info, mount_device)
|
||||
disk_info = {
|
||||
"bus": "virtio",
|
||||
"dev": "vde",
|
||||
"type": "disk",
|
||||
}
|
||||
conf = libvirt_driver.connect_volume(connection_info, disk_info)
|
||||
tree = conf.format_dom()
|
||||
self.assertEqual(tree.get('type'), 'network')
|
||||
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
|
||||
rbd_name = '%s/%s' % ('rbd', name)
|
||||
self.assertEqual(tree.find('./source').get('name'), rbd_name)
|
||||
self.assertEqual(tree.find('./source/auth'), None)
|
||||
libvirt_driver.disconnect_volume(connection_info, mount_device)
|
||||
libvirt_driver.disconnect_volume(connection_info, "vde")
|
||||
|
||||
def test_libvirt_rbd_driver_auth_enabled(self):
|
||||
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
|
||||
@@ -202,9 +222,13 @@ class LibvirtVolumeTestCase(test.TestCase):
|
||||
connection_info['data']['auth_username'] = user
|
||||
connection_info['data']['secret_type'] = secret_type
|
||||
connection_info['data']['secret_uuid'] = uuid
|
||||
disk_info = {
|
||||
"bus": "virtio",
|
||||
"dev": "vde",
|
||||
"type": "disk",
|
||||
}
|
||||
|
||||
mount_device = "vde"
|
||||
conf = libvirt_driver.connect_volume(connection_info, mount_device)
|
||||
conf = libvirt_driver.connect_volume(connection_info, disk_info)
|
||||
tree = conf.format_dom()
|
||||
self.assertEqual(tree.get('type'), 'network')
|
||||
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
|
||||
@@ -213,7 +237,7 @@ class LibvirtVolumeTestCase(test.TestCase):
|
||||
self.assertEqual(tree.find('./auth').get('username'), user)
|
||||
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
|
||||
self.assertEqual(tree.find('./auth/secret').get('uuid'), uuid)
|
||||
libvirt_driver.disconnect_volume(connection_info, mount_device)
|
||||
libvirt_driver.disconnect_volume(connection_info, "vde")
|
||||
|
||||
def test_libvirt_rbd_driver_auth_enabled_flags_override(self):
|
||||
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
|
||||
@@ -232,9 +256,13 @@ class LibvirtVolumeTestCase(test.TestCase):
|
||||
flags_user = 'bar'
|
||||
self.flags(rbd_user=flags_user,
|
||||
rbd_secret_uuid=flags_uuid)
|
||||
disk_info = {
|
||||
"bus": "virtio",
|
||||
"dev": "vde",
|
||||
"type": "disk",
|
||||
}
|
||||
|
||||
mount_device = "vde"
|
||||
conf = libvirt_driver.connect_volume(connection_info, mount_device)
|
||||
conf = libvirt_driver.connect_volume(connection_info, disk_info)
|
||||
tree = conf.format_dom()
|
||||
self.assertEqual(tree.get('type'), 'network')
|
||||
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
|
||||
@@ -243,7 +271,7 @@ class LibvirtVolumeTestCase(test.TestCase):
|
||||
self.assertEqual(tree.find('./auth').get('username'), flags_user)
|
||||
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
|
||||
self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
|
||||
libvirt_driver.disconnect_volume(connection_info, mount_device)
|
||||
libvirt_driver.disconnect_volume(connection_info, "vde")
|
||||
|
||||
def test_libvirt_rbd_driver_auth_disabled(self):
|
||||
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
|
||||
@@ -257,16 +285,20 @@ class LibvirtVolumeTestCase(test.TestCase):
|
||||
connection_info['data']['auth_username'] = user
|
||||
connection_info['data']['secret_type'] = secret_type
|
||||
connection_info['data']['secret_uuid'] = uuid
|
||||
disk_info = {
|
||||
"bus": "virtio",
|
||||
"dev": "vde",
|
||||
"type": "disk",
|
||||
}
|
||||
|
||||
mount_device = "vde"
|
||||
conf = libvirt_driver.connect_volume(connection_info, mount_device)
|
||||
conf = libvirt_driver.connect_volume(connection_info, disk_info)
|
||||
tree = conf.format_dom()
|
||||
self.assertEqual(tree.get('type'), 'network')
|
||||
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
|
||||
rbd_name = '%s/%s' % ('rbd', name)
|
||||
self.assertEqual(tree.find('./source').get('name'), rbd_name)
|
||||
self.assertEqual(tree.find('./auth'), None)
|
||||
libvirt_driver.disconnect_volume(connection_info, mount_device)
|
||||
libvirt_driver.disconnect_volume(connection_info, "vde")
|
||||
|
||||
def test_libvirt_rbd_driver_auth_disabled_flags_override(self):
|
||||
libvirt_driver = volume.LibvirtNetVolumeDriver(self.fake_conn)
|
||||
@@ -287,9 +319,13 @@ class LibvirtVolumeTestCase(test.TestCase):
|
||||
flags_user = 'bar'
|
||||
self.flags(rbd_user=flags_user,
|
||||
rbd_secret_uuid=flags_uuid)
|
||||
disk_info = {
|
||||
"bus": "virtio",
|
||||
"dev": "vde",
|
||||
"type": "disk",
|
||||
}
|
||||
|
||||
mount_device = "vde"
|
||||
conf = libvirt_driver.connect_volume(connection_info, mount_device)
|
||||
conf = libvirt_driver.connect_volume(connection_info, disk_info)
|
||||
tree = conf.format_dom()
|
||||
self.assertEqual(tree.get('type'), 'network')
|
||||
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
|
||||
@@ -298,7 +334,7 @@ class LibvirtVolumeTestCase(test.TestCase):
|
||||
self.assertEqual(tree.find('./auth').get('username'), flags_user)
|
||||
self.assertEqual(tree.find('./auth/secret').get('type'), secret_type)
|
||||
self.assertEqual(tree.find('./auth/secret').get('uuid'), flags_uuid)
|
||||
libvirt_driver.disconnect_volume(connection_info, mount_device)
|
||||
libvirt_driver.disconnect_volume(connection_info, "vde")
|
||||
|
||||
def test_libvirt_nfs_driver(self):
|
||||
# NOTE(vish) exists is to make driver assume connecting worked
|
||||
@@ -313,12 +349,16 @@ class LibvirtVolumeTestCase(test.TestCase):
|
||||
file_path = os.path.join(export_mnt_base, name)
|
||||
|
||||
connection_info = {'data': {'export': export_string, 'name': name}}
|
||||
mount_device = "vde"
|
||||
conf = libvirt_driver.connect_volume(connection_info, mount_device)
|
||||
disk_info = {
|
||||
"bus": "virtio",
|
||||
"dev": "vde",
|
||||
"type": "disk",
|
||||
}
|
||||
conf = libvirt_driver.connect_volume(connection_info, disk_info)
|
||||
tree = conf.format_dom()
|
||||
self.assertEqual(tree.get('type'), 'file')
|
||||
self.assertEqual(tree.find('./source').get('file'), file_path)
|
||||
libvirt_driver.disconnect_volume(connection_info, mount_device)
|
||||
libvirt_driver.disconnect_volume(connection_info, "vde")
|
||||
|
||||
expected_commands = [
|
||||
('stat', export_mnt_base),
|
||||
|
||||
@@ -378,10 +378,10 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
|
||||
instance_ref, network_info = self._get_running_instance()
|
||||
self.connection.attach_volume({'driver_volume_type': 'fake'},
|
||||
instance_ref,
|
||||
'/mnt/nova/something')
|
||||
'/dev/sda')
|
||||
self.connection.detach_volume({'driver_volume_type': 'fake'},
|
||||
instance_ref,
|
||||
'/mnt/nova/something')
|
||||
'/dev/sda')
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_attach_detach_different_power_states(self):
|
||||
@@ -389,11 +389,11 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
|
||||
self.connection.power_off(instance_ref)
|
||||
self.connection.attach_volume({'driver_volume_type': 'fake'},
|
||||
instance_ref,
|
||||
'/mnt/nova/something')
|
||||
'/dev/sda')
|
||||
self.connection.power_on(instance_ref)
|
||||
self.connection.detach_volume({'driver_volume_type': 'fake'},
|
||||
instance_ref,
|
||||
'/mnt/nova/something')
|
||||
'/dev/sda')
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_get_info(self):
|
||||
|
||||
Reference in New Issue
Block a user