Remove AoE, Clean up volume code

* Removes Ata Over Ethernet
 * Adds drivers to libvirt for volumes
 * Adds initialize_connection and terminate_connection to volume api
 * Passes connection info back through volume api

Change-Id: I1b1626f40bebe8466ab410fb174683293c7c474f
This commit is contained in:
Vishvananda Ishaya
2011-09-23 09:22:32 -07:00
parent c25da2dd91
commit cd2cd47a85
13 changed files with 248 additions and 328 deletions

View File

@@ -12,6 +12,7 @@ Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org> Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com> Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Arvind Somya <asomya@cisco.com> Arvind Somya <asomya@cisco.com>
Ben McGraw <ben@pistoncloud.com>
Bilal Akhtar <bilalakhtar@ubuntu.com> Bilal Akhtar <bilalakhtar@ubuntu.com>
Brad Hall <brad@nicira.com> Brad Hall <brad@nicira.com>
Brad McConnell <bmcconne@rackspace.com> Brad McConnell <bmcconne@rackspace.com>

View File

@@ -962,9 +962,8 @@ class VmCommands(object):
msg = _('Only KVM and QEmu are supported for now. Sorry!') msg = _('Only KVM and QEmu are supported for now. Sorry!')
raise exception.Error(msg) raise exception.Error(msg)
if (FLAGS.volume_driver != 'nova.volume.driver.AOEDriver' and \ if FLAGS.volume_driver != 'nova.volume.driver.ISCSIDriver':
FLAGS.volume_driver != 'nova.volume.driver.ISCSIDriver'): msg = _("Support only ISCSIDriver. Sorry!")
msg = _("Support only AOEDriver and ISCSIDriver. Sorry!")
raise exception.Error(msg) raise exception.Error(msg)
rpc.call(ctxt, rpc.call(ctxt,

0
bin/nova-spoolsentry Normal file → Executable file
View File

View File

@@ -73,7 +73,6 @@ External unix tools that are required:
* dnsmasq * dnsmasq
* vlan * vlan
* open-iscsi and iscsitarget (if you use iscsi volumes) * open-iscsi and iscsitarget (if you use iscsi volumes)
* aoetools and vblade-persist (if you use aoe-volumes)
Nova uses cutting-edge versions of many packages. There are ubuntu packages in Nova uses cutting-edge versions of many packages. There are ubuntu packages in
the nova-core trunk ppa. You can use add this ppa to your sources list on an the nova-core trunk ppa. You can use add this ppa to your sources list on an

View File

@@ -394,10 +394,6 @@ class VolumeIsBusy(Error):
message = _("deleting volume %(volume_name)s that has snapshot") message = _("deleting volume %(volume_name)s that has snapshot")
class ExportDeviceNotFoundForVolume(NotFound):
message = _("No export device found for volume %(volume_id)s.")
class ISCSITargetNotFoundForVolume(NotFound): class ISCSITargetNotFoundForVolume(NotFound):
message = _("No target id found for volume %(volume_id)s.") message = _("No target id found for volume %(volume_id)s.")
@@ -406,6 +402,10 @@ class DiskNotFound(NotFound):
message = _("No disk at %(location)s") message = _("No disk at %(location)s")
class VolumeDriverNotFound(NotFound):
message = _("Could not find a handler for %(driver_type)s volume.")
class InvalidImageRef(Invalid): class InvalidImageRef(Invalid):
message = _("Invalid image href %(image_href)s.") message = _("Invalid image href %(image_href)s.")

View File

@@ -10,7 +10,7 @@ flags.DEFINE_integer('rpc_conn_pool_size', 30,
'Size of RPC connection pool') 'Size of RPC connection pool')
class RemoteError(exception.Error): class RemoteError(exception.NovaException):
"""Signifies that a remote class has raised an exception. """Signifies that a remote class has raised an exception.
Containes a string representation of the type of the original exception, Containes a string representation of the type of the original exception,
@@ -19,11 +19,10 @@ class RemoteError(exception.Error):
contains all of the relevent info. contains all of the relevent info.
""" """
message = _("Remote error: %(exc_type)s %(value)s\n%(traceback)s.")
def __init__(self, exc_type, value, traceback): def __init__(self, exc_type=None, value=None, traceback=None):
self.exc_type = exc_type self.exc_type = exc_type
self.value = value self.value = value
self.traceback = traceback self.traceback = traceback
super(RemoteError, self).__init__('%s %s\n%s' % (exc_type, super(RemoteError, self).__init__(**self.__dict__)
value,
traceback))

View File

@@ -33,11 +33,7 @@ FLAGS['network_size'].SetDefault(8)
FLAGS['num_networks'].SetDefault(2) FLAGS['num_networks'].SetDefault(2)
FLAGS['fake_network'].SetDefault(True) FLAGS['fake_network'].SetDefault(True)
FLAGS['image_service'].SetDefault('nova.image.fake.FakeImageService') FLAGS['image_service'].SetDefault('nova.image.fake.FakeImageService')
flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver') flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
FLAGS['num_shelves'].SetDefault(2)
FLAGS['blades_per_shelf'].SetDefault(4)
FLAGS['iscsi_num_targets'].SetDefault(8) FLAGS['iscsi_num_targets'].SetDefault(8)
FLAGS['verbose'].SetDefault(True) FLAGS['verbose'].SetDefault(True)
FLAGS['sqlite_db'].SetDefault("tests.sqlite") FLAGS['sqlite_db'].SetDefault("tests.sqlite")

View File

@@ -1080,7 +1080,8 @@ class SimpleDriverTestCase(test.TestCase):
rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
{"method": 'compare_cpu', {"method": 'compare_cpu',
"args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\
AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) AndRaise(rpc.RemoteError(exception.InvalidCPUInfo,
exception.InvalidCPUInfo(reason='fake')))
self.mox.ReplayAll() self.mox.ReplayAll()
try: try:
@@ -1089,7 +1090,7 @@ class SimpleDriverTestCase(test.TestCase):
dest, dest,
False) False)
except rpc.RemoteError, e: except rpc.RemoteError, e:
c = (e.message.find(_("doesn't have compatibility to")) >= 0) c = (e.exc_type == exception.InvalidCPUInfo)
self.assertTrue(c) self.assertTrue(c)
db.instance_destroy(self.context, instance_id) db.instance_destroy(self.context, instance_id)

View File

@@ -21,6 +21,7 @@ Tests For Compute
""" """
from copy import copy from copy import copy
import mox
from nova import compute from nova import compute
from nova import context from nova import context
@@ -159,21 +160,6 @@ class ComputeTestCase(test.TestCase):
'project_id': self.project_id} 'project_id': self.project_id}
return db.security_group_create(self.context, values) return db.security_group_create(self.context, values)
def _get_dummy_instance(self):
"""Get mock-return-value instance object
Use this when any testcase executed later than test_run_terminate
"""
vol1 = models.Volume()
vol1['id'] = 1
vol2 = models.Volume()
vol2['id'] = 2
instance_ref = models.Instance()
instance_ref['id'] = 1
instance_ref['volumes'] = [vol1, vol2]
instance_ref['hostname'] = 'hostname-1'
instance_ref['host'] = 'dummy'
return instance_ref
def test_create_instance_defaults_display_name(self): def test_create_instance_defaults_display_name(self):
"""Verify that an instance cannot be created without a display_name.""" """Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)] cases = [dict(), dict(display_name=None)]
@@ -726,235 +712,124 @@ class ComputeTestCase(test.TestCase):
def test_pre_live_migration_instance_has_no_fixed_ip(self): def test_pre_live_migration_instance_has_no_fixed_ip(self):
"""Confirm raising exception if instance doesn't have fixed_ip.""" """Confirm raising exception if instance doesn't have fixed_ip."""
instance_ref = self._get_dummy_instance() # creating instance testdata
instance_id = self._create_instance({'host': 'dummy'})
c = context.get_admin_context() c = context.get_admin_context()
i_id = instance_ref['id'] inst_ref = db.instance_get(c, instance_id)
topic = db.queue_get_for(c, FLAGS.compute_topic, inst_ref['host'])
dbmock = self.mox.CreateMock(db) # start test
dbmock.instance_get(c, i_id).AndReturn(instance_ref) self.assertRaises(exception.FixedIpNotFoundForInstance,
self.compute.db = dbmock
self.mox.ReplayAll()
self.assertRaises(exception.NotFound,
self.compute.pre_live_migration, self.compute.pre_live_migration,
c, instance_ref['id'], time=FakeTime()) c, inst_ref['id'], time=FakeTime())
# cleanup
db.instance_destroy(c, instance_id)
def test_pre_live_migration_instance_has_volume(self): def test_pre_live_migration_works_correctly(self):
"""Confirm setup_compute_volume is called when volume is mounted.""" """Confirm setup_compute_volume is called when volume is mounted."""
def fake_nw_info(*args, **kwargs): # creating instance testdata
return [(0, {'ips':['dummy']})] instance_id = self._create_instance({'host': 'dummy'})
i_ref = self._get_dummy_instance()
c = context.get_admin_context() c = context.get_admin_context()
inst_ref = db.instance_get(c, instance_id)
topic = db.queue_get_for(c, FLAGS.compute_topic, inst_ref['host'])
self._setup_other_managers() # creating mocks
dbmock = self.mox.CreateMock(db) self.mox.StubOutWithMock(self.compute.driver, 'pre_live_migration')
volmock = self.mox.CreateMock(self.volume_manager) self.compute.driver.pre_live_migration({'block_device_mapping': []})
drivermock = self.mox.CreateMock(self.compute_driver) dummy_nw_info = [[None, {'ips':'1.1.1.1'}]]
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) self.compute._get_instance_nw_info(c, mox.IsA(inst_ref)
for i in range(len(i_ref['volumes'])): ).AndReturn(dummy_nw_info)
vid = i_ref['volumes'][i]['id'] self.mox.StubOutWithMock(self.compute.driver, 'plug_vifs')
volmock.setup_compute_volume(c, vid).InAnyOrder('g1') self.compute.driver.plug_vifs(mox.IsA(inst_ref), dummy_nw_info)
drivermock.plug_vifs(i_ref, fake_nw_info()) self.mox.StubOutWithMock(self.compute.driver,
drivermock.ensure_filtering_rules_for_instance(i_ref, fake_nw_info()) 'ensure_filtering_rules_for_instance')
self.compute.driver.ensure_filtering_rules_for_instance(
self.stubs.Set(self.compute, '_get_instance_nw_info', fake_nw_info) mox.IsA(inst_ref), dummy_nw_info)
self.compute.db = dbmock
self.compute.volume_manager = volmock
self.compute.driver = drivermock
# start test
self.mox.ReplayAll() self.mox.ReplayAll()
ret = self.compute.pre_live_migration(c, i_ref['id']) ret = self.compute.pre_live_migration(c, inst_ref['id'])
self.assertEqual(ret, None) self.assertEqual(ret, None)
def test_pre_live_migration_instance_has_no_volume(self): # cleanup
"""Confirm log meg when instance doesn't mount any volumes.""" db.instance_destroy(c, instance_id)
def fake_nw_info(*args, **kwargs):
return [(0, {'ips':['dummy']})]
i_ref = self._get_dummy_instance()
i_ref['volumes'] = []
c = context.get_admin_context()
self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
self.mox.StubOutWithMock(compute_manager.LOG, 'info')
compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
drivermock.plug_vifs(i_ref, fake_nw_info())
drivermock.ensure_filtering_rules_for_instance(i_ref, fake_nw_info())
self.stubs.Set(self.compute, '_get_instance_nw_info', fake_nw_info)
self.compute.db = dbmock
self.compute.driver = drivermock
self.mox.ReplayAll()
ret = self.compute.pre_live_migration(c, i_ref['id'], time=FakeTime())
self.assertEqual(ret, None)
def test_pre_live_migration_setup_compute_node_fail(self):
"""Confirm operation setup_compute_network() fails.
It retries and raise exception when timeout exceeded.
"""
def fake_nw_info(*args, **kwargs):
return [(0, {'ips':['dummy']})]
i_ref = self._get_dummy_instance()
c = context.get_admin_context()
self._setup_other_managers()
dbmock = self.mox.CreateMock(db)
netmock = self.mox.CreateMock(self.network_manager)
volmock = self.mox.CreateMock(self.volume_manager)
drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
for i in range(len(i_ref['volumes'])):
volmock.setup_compute_volume(c, i_ref['volumes'][i]['id'])
for i in range(FLAGS.live_migration_retry_count):
drivermock.plug_vifs(i_ref, fake_nw_info()).\
AndRaise(exception.ProcessExecutionError())
self.stubs.Set(self.compute, '_get_instance_nw_info', fake_nw_info)
self.compute.db = dbmock
self.compute.network_manager = netmock
self.compute.volume_manager = volmock
self.compute.driver = drivermock
self.mox.ReplayAll()
self.assertRaises(exception.ProcessExecutionError,
self.compute.pre_live_migration,
c, i_ref['id'], time=FakeTime())
def test_live_migration_works_correctly_with_volume(self):
"""Confirm check_for_export to confirm volume health check."""
i_ref = self._get_dummy_instance()
c = context.get_admin_context()
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
dbmock = self.mox.CreateMock(db)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
"args": {'instance_id': i_ref['id']}})
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
rpc.call(c, topic, {"method": "pre_live_migration",
"args": {'instance_id': i_ref['id'],
'block_migration': False,
'disk': None}})
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
self.compute.post_live_migration,
self.compute.rollback_live_migration,
False)
self.compute.db = dbmock
self.mox.ReplayAll()
ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
self.assertEqual(ret, None)
def test_live_migration_dest_raises_exception(self): def test_live_migration_dest_raises_exception(self):
"""Confirm exception when pre_live_migration fails.""" """Confirm exception when pre_live_migration fails."""
i_ref = self._get_dummy_instance() # creating instance testdata
instance_id = self._create_instance({'host': 'dummy'})
c = context.get_admin_context() c = context.get_admin_context()
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) inst_ref = db.instance_get(c, instance_id)
topic = db.queue_get_for(c, FLAGS.compute_topic, inst_ref['host'])
# creating volume testdata
volume_id = 1
db.volume_create(c, {'id': volume_id})
values = {'instance_id': instance_id, 'device_name': '/dev/vdc',
'delete_on_termination': False, 'volume_id': volume_id}
db.block_device_mapping_create(c, values)
dbmock = self.mox.CreateMock(db) # creating mocks
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export", rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
"args": {'instance_id': i_ref['id']}}) "args": {'instance_id': instance_id}})
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
rpc.call(c, topic, {"method": "pre_live_migration", rpc.call(c, topic, {"method": "pre_live_migration",
"args": {'instance_id': i_ref['id'], "args": {'instance_id': instance_id,
'block_migration': False, 'block_migration': True,
'disk': None}}).\ 'disk': None}}).\
AndRaise(rpc.RemoteError('', '', '')) AndRaise(rpc.common.RemoteError('', '', ''))
dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE, # mocks for rollback
'task_state': None, rpc.call(c, topic, {"method": "remove_volume_connection",
'host': i_ref['host']}) "args": {'instance_id': instance_id,
for v in i_ref['volumes']: 'volume_id': volume_id}})
dbmock.volume_update(c, v['id'], {'status': 'in-use'}) rpc.cast(c, topic, {"method": "rollback_live_migration_at_destination",
# mock for volume_api.remove_from_compute "args": {'instance_id': inst_ref['id']}})
rpc.call(c, topic, {"method": "remove_volume",
"args": {'volume_id': v['id']}})
self.compute.db = dbmock # start test
self.mox.ReplayAll() self.mox.ReplayAll()
self.assertRaises(rpc.RemoteError, self.assertRaises(rpc.RemoteError,
self.compute.live_migration, self.compute.live_migration,
c, i_ref['id'], i_ref['host']) c, instance_id, inst_ref['host'], True)
def test_live_migration_dest_raises_exception_no_volume(self): # cleanup
"""Same as above test(input pattern is different) """ for bdms in db.block_device_mapping_get_all_by_instance(c,
i_ref = self._get_dummy_instance() instance_id):
i_ref['volumes'] = [] db.block_device_mapping_destroy(c, bdms['id'])
c = context.get_admin_context() db.volume_destroy(c, volume_id)
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) db.instance_destroy(c, instance_id)
dbmock = self.mox.CreateMock(db) def test_live_migration_works_correctly(self):
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, topic, {"method": "pre_live_migration",
"args": {'instance_id': i_ref['id'],
'block_migration': False,
'disk': None}}).\
AndRaise(rpc.RemoteError('', '', ''))
dbmock.instance_update(c, i_ref['id'], {'vm_state': vm_states.ACTIVE,
'task_state': None,
'host': i_ref['host']})
self.compute.db = dbmock
self.mox.ReplayAll()
self.assertRaises(rpc.RemoteError,
self.compute.live_migration,
c, i_ref['id'], i_ref['host'])
def test_live_migration_works_correctly_no_volume(self):
"""Confirm live_migration() works as expected correctly.""" """Confirm live_migration() works as expected correctly."""
i_ref = self._get_dummy_instance() # creating instance testdata
i_ref['volumes'] = [] instance_id = self._create_instance({'host': 'dummy'})
c = context.get_admin_context() c = context.get_admin_context()
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) inst_ref = db.instance_get(c, instance_id)
topic = db.queue_get_for(c, FLAGS.compute_topic, inst_ref['host'])
dbmock = self.mox.CreateMock(db) # create
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'call')
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
rpc.call(c, topic, {"method": "pre_live_migration", rpc.call(c, topic, {"method": "pre_live_migration",
"args": {'instance_id': i_ref['id'], "args": {'instance_id': instance_id,
'block_migration': False, 'block_migration': False,
'disk': None}}) 'disk': None}})
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
self.compute.post_live_migration,
self.compute.rollback_live_migration,
False)
self.compute.db = dbmock # start test
self.mox.ReplayAll() self.mox.ReplayAll()
ret = self.compute.live_migration(c, i_ref['id'], i_ref['host']) ret = self.compute.live_migration(c, inst_ref['id'], inst_ref['host'])
self.assertEqual(ret, None) self.assertEqual(ret, None)
# cleanup
db.instance_destroy(c, instance_id)
def test_post_live_migration_working_correctly(self): def test_post_live_migration_working_correctly(self):
"""Confirm post_live_migration() works as expected correctly.""" """Confirm post_live_migration() works as expected correctly."""
dest = 'desthost' dest = 'desthost'
flo_addr = '1.2.1.2' flo_addr = '1.2.1.2'
# Preparing datas # creating testdata
c = context.get_admin_context() c = context.get_admin_context()
instance_id = self._create_instance() instance_id = self._create_instance({'state_description': 'migrating',
'state': power_state.PAUSED})
i_ref = db.instance_get(c, instance_id) i_ref = db.instance_get(c, instance_id)
db.instance_update(c, i_ref['id'], {'vm_state': vm_states.MIGRATING, db.instance_update(c, i_ref['id'], {'vm_state': vm_states.MIGRATING,
'power_state': power_state.PAUSED}) 'power_state': power_state.PAUSED})
@@ -964,14 +839,8 @@ class ComputeTestCase(test.TestCase):
fix_ref = db.fixed_ip_get_by_address(c, fix_addr) fix_ref = db.fixed_ip_get_by_address(c, fix_addr)
flo_ref = db.floating_ip_create(c, {'address': flo_addr, flo_ref = db.floating_ip_create(c, {'address': flo_addr,
'fixed_ip_id': fix_ref['id']}) 'fixed_ip_id': fix_ref['id']})
# reload is necessary before setting mocks
i_ref = db.instance_get(c, instance_id)
# Preparing mocks # creating mocks
self.mox.StubOutWithMock(self.compute.volume_manager,
'remove_compute_volume')
for v in i_ref['volumes']:
self.compute.volume_manager.remove_compute_volume(c, v['id'])
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(i_ref, []) self.compute.driver.unfilter_instance(i_ref, [])
self.mox.StubOutWithMock(rpc, 'call') self.mox.StubOutWithMock(rpc, 'call')
@@ -979,18 +848,18 @@ class ComputeTestCase(test.TestCase):
{"method": "post_live_migration_at_destination", {"method": "post_live_migration_at_destination",
"args": {'instance_id': i_ref['id'], 'block_migration': False}}) "args": {'instance_id': i_ref['id'], 'block_migration': False}})
# executing # start test
self.mox.ReplayAll() self.mox.ReplayAll()
ret = self.compute.post_live_migration(c, i_ref, dest) ret = self.compute.post_live_migration(c, i_ref, dest)
# make sure every data is rewritten to dest # make sure every data is rewritten to destinatioin hostname.
i_ref = db.instance_get(c, i_ref['id']) i_ref = db.instance_get(c, i_ref['id'])
c1 = (i_ref['host'] == dest) c1 = (i_ref['host'] == dest)
flo_refs = db.floating_ip_get_all_by_host(c, dest) flo_refs = db.floating_ip_get_all_by_host(c, dest)
c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr) c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr)
# post operaton
self.assertTrue(c1 and c2) self.assertTrue(c1 and c2)
# cleanup
db.instance_destroy(c, instance_id) db.instance_destroy(c, instance_id)
db.volume_destroy(c, v_ref['id']) db.volume_destroy(c, v_ref['id'])
db.floating_ip_destroy(c, flo_addr) db.floating_ip_destroy(c, flo_addr)

View File

@@ -30,6 +30,7 @@ from nova import context
from nova import db from nova import db
from nova import exception from nova import exception
from nova import flags from nova import flags
from nova import log as logging
from nova import test from nova import test
from nova import utils from nova import utils
from nova.api.ec2 import cloud from nova.api.ec2 import cloud
@@ -38,10 +39,13 @@ from nova.compute import vm_states
from nova.virt import driver from nova.virt import driver
from nova.virt.libvirt import connection from nova.virt.libvirt import connection
from nova.virt.libvirt import firewall from nova.virt.libvirt import firewall
from nova.virt.libvirt import volume
from nova.volume import driver as volume_driver
from nova.tests import fake_network from nova.tests import fake_network
libvirt = None libvirt = None
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.test_libvirt')
_fake_network_info = fake_network.fake_get_instance_nw_info _fake_network_info = fake_network.fake_get_instance_nw_info
_ipv4_like = fake_network.ipv4_like _ipv4_like = fake_network.ipv4_like
@@ -87,6 +91,71 @@ class FakeVirtDomain(object):
return self._fake_dom_xml return self._fake_dom_xml
class LibvirtVolumeTestCase(test.TestCase):
@staticmethod
def fake_execute(*cmd, **kwargs):
LOG.debug("FAKE EXECUTE: %s" % ' '.join(cmd))
return None, None
def setUp(self):
super(LibvirtVolumeTestCase, self).setUp()
self.stubs.Set(utils, 'execute', self.fake_execute)
def test_libvirt_iscsi_driver(self):
# NOTE(vish) exists is to make driver assume connecting worked
self.stubs.Set(os.path, 'exists', lambda x: True)
vol_driver = volume_driver.ISCSIDriver()
libvirt_driver = volume.LibvirtISCSIVolumeDriver('fake')
name = 'volume-00000001'
vol = {'id': 1,
'name': name,
'provider_auth': None,
'provider_location': '10.0.2.15:3260,fake '
'iqn.2010-10.org.openstack:volume-00000001'}
address = '127.0.0.1'
connection_info = vol_driver.initialize_connection(vol, address)
mount_device = "vde"
xml = libvirt_driver.connect_volume(connection_info, mount_device)
tree = xml_to_tree(xml)
dev_str = '/dev/disk/by-path/ip-10.0.2.15:3260-iscsi-iqn.' \
'2010-10.org.openstack:%s-lun-0' % name
self.assertEqual(tree.get('type'), 'block')
self.assertEqual(tree.find('./source').get('dev'), dev_str)
libvirt_driver.disconnect_volume(connection_info, mount_device)
def test_libvirt_sheepdog_driver(self):
vol_driver = volume_driver.SheepdogDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver('fake')
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
address = '127.0.0.1'
connection_info = vol_driver.initialize_connection(vol, address)
mount_device = "vde"
xml = libvirt_driver.connect_volume(connection_info, mount_device)
tree = xml_to_tree(xml)
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'sheepdog')
self.assertEqual(tree.find('./source').get('name'), name)
libvirt_driver.disconnect_volume(connection_info, mount_device)
def test_libvirt_rbd_driver(self):
vol_driver = volume_driver.RBDDriver()
libvirt_driver = volume.LibvirtNetVolumeDriver('fake')
name = 'volume-00000001'
vol = {'id': 1, 'name': name}
address = '127.0.0.1'
connection_info = vol_driver.initialize_connection(vol, address)
mount_device = "vde"
xml = libvirt_driver.connect_volume(connection_info, mount_device)
tree = xml_to_tree(xml)
self.assertEqual(tree.get('type'), 'network')
self.assertEqual(tree.find('./source').get('protocol'), 'rbd')
rbd_name = '%s/%s' % (FLAGS.rbd_pool, name)
self.assertEqual(tree.find('./source').get('name'), rbd_name)
libvirt_driver.disconnect_volume(connection_info, mount_device)
class CacheConcurrencyTestCase(test.TestCase): class CacheConcurrencyTestCase(test.TestCase):
def setUp(self): def setUp(self):
super(CacheConcurrencyTestCase, self).setUp() super(CacheConcurrencyTestCase, self).setUp()
@@ -145,6 +214,20 @@ class CacheConcurrencyTestCase(test.TestCase):
eventlet.sleep(0) eventlet.sleep(0)
class FakeVolumeDriver(object):
def __init__(self, *args, **kwargs):
pass
def attach_volume(self, *args):
pass
def detach_volume(self, *args):
pass
def get_xml(self, *args):
return ""
class LibvirtConnTestCase(test.TestCase): class LibvirtConnTestCase(test.TestCase):
def setUp(self): def setUp(self):
@@ -192,14 +275,14 @@ class LibvirtConnTestCase(test.TestCase):
return FakeVirtDomain() return FakeVirtDomain()
# Creating mocks # Creating mocks
volume_driver = 'iscsi=nova.tests.test_libvirt.FakeVolumeDriver'
self.flags(libvirt_volume_drivers=[volume_driver])
fake = FakeLibvirtConnection() fake = FakeLibvirtConnection()
# Customizing above fake if necessary # Customizing above fake if necessary
for key, val in kwargs.items(): for key, val in kwargs.items():
fake.__setattr__(key, val) fake.__setattr__(key, val)
self.flags(image_service='nova.image.fake.FakeImageService') self.flags(image_service='nova.image.fake.FakeImageService')
fw_driver = "nova.tests.fake_network.FakeIptablesFirewallDriver"
self.flags(firewall_driver=fw_driver)
self.flags(libvirt_vif_driver="nova.tests.fake_network.FakeVIFDriver") self.flags(libvirt_vif_driver="nova.tests.fake_network.FakeVIFDriver")
self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn')
@@ -382,14 +465,16 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(snapshot['status'], 'active') self.assertEquals(snapshot['status'], 'active')
self.assertEquals(snapshot['name'], snapshot_name) self.assertEquals(snapshot['name'], snapshot_name)
def test_attach_invalid_device(self): def test_attach_invalid_volume_type(self):
self.create_fake_libvirt_mock() self.create_fake_libvirt_mock()
connection.LibvirtConnection._conn.lookupByName = self.fake_lookup connection.LibvirtConnection._conn.lookupByName = self.fake_lookup
self.mox.ReplayAll() self.mox.ReplayAll()
conn = connection.LibvirtConnection(False) conn = connection.LibvirtConnection(False)
self.assertRaises(exception.InvalidDevicePath, self.assertRaises(exception.VolumeDriverNotFound,
conn.attach_volume, conn.attach_volume,
"fake", "bad/device/path", "/dev/fake") {"driver_volume_type": "badtype"},
"fake",
"/dev/fake")
def test_multi_nic(self): def test_multi_nic(self):
instance_data = dict(self.test_instance) instance_data = dict(self.test_instance)
@@ -640,9 +725,15 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll() self.mox.ReplayAll()
try: try:
conn = connection.LibvirtConnection(False) conn = connection.LibvirtConnection(False)
conn.firewall_driver.setattr('setup_basic_filtering', fake_none) self.stubs.Set(conn.firewall_driver,
conn.firewall_driver.setattr('prepare_instance_filter', fake_none) 'setup_basic_filtering',
conn.firewall_driver.setattr('instance_filter_exists', fake_none) fake_none)
self.stubs.Set(conn.firewall_driver,
'prepare_instance_filter',
fake_none)
self.stubs.Set(conn.firewall_driver,
'instance_filter_exists',
fake_none)
conn.ensure_filtering_rules_for_instance(instance_ref, conn.ensure_filtering_rules_for_instance(instance_ref,
network_info, network_info,
time=fake_timer) time=fake_timer)
@@ -708,6 +799,27 @@ class LibvirtConnTestCase(test.TestCase):
db.volume_destroy(self.context, volume_ref['id']) db.volume_destroy(self.context, volume_ref['id'])
db.instance_destroy(self.context, instance_ref['id']) db.instance_destroy(self.context, instance_ref['id'])
def test_pre_live_migration_works_correctly(self):
"""Confirms pre_block_migration works correctly."""
# Creating testdata
vol = {'block_device_mapping': [
{'connection_info': 'dummy', 'mount_device': '/dev/sda'},
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
conn = connection.LibvirtConnection(False)
# Creating mocks
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
driver.block_device_info_get_mapping(vol
).AndReturn(vol['block_device_mapping'])
self.mox.StubOutWithMock(conn, "volume_driver_method")
for v in vol['block_device_mapping']:
conn.volume_driver_method('connect_volume',
v['connection_info'], v['mount_device'])
# Starting test
self.mox.ReplayAll()
self.assertEqual(conn.pre_live_migration(vol), None)
def test_pre_block_migration_works_correctly(self): def test_pre_block_migration_works_correctly(self):
"""Confirms pre_block_migration works correctly.""" """Confirms pre_block_migration works correctly."""
@@ -822,8 +934,12 @@ class LibvirtConnTestCase(test.TestCase):
# Start test # Start test
self.mox.ReplayAll() self.mox.ReplayAll()
conn = connection.LibvirtConnection(False) conn = connection.LibvirtConnection(False)
conn.firewall_driver.setattr('setup_basic_filtering', fake_none) self.stubs.Set(conn.firewall_driver,
conn.firewall_driver.setattr('prepare_instance_filter', fake_none) 'setup_basic_filtering',
fake_none)
self.stubs.Set(conn.firewall_driver,
'prepare_instance_filter',
fake_none)
try: try:
conn.spawn(self.context, instance, network_info) conn.spawn(self.context, instance, network_info)

View File

@@ -254,9 +254,11 @@ class _VirtDriverTestCase(test.TestCase):
network_info = test_utils.get_test_network_info() network_info = test_utils.get_test_network_info()
instance_ref = test_utils.get_test_instance() instance_ref = test_utils.get_test_instance()
self.connection.spawn(self.ctxt, instance_ref, network_info) self.connection.spawn(self.ctxt, instance_ref, network_info)
self.connection.attach_volume(instance_ref['name'], self.connection.attach_volume({'driver_volume_type': 'fake'},
'/dev/null', '/mnt/nova/something') instance_ref['name'],
self.connection.detach_volume(instance_ref['name'], '/mnt/nova/something')
self.connection.detach_volume({'driver_volume_type': 'fake'},
instance_ref['name'],
'/mnt/nova/something') '/mnt/nova/something')
@catch_notimplementederror @catch_notimplementederror

View File

@@ -257,7 +257,7 @@ class VolumeTestCase(test.TestCase):
class DriverTestCase(test.TestCase): class DriverTestCase(test.TestCase):
"""Base Test class for Drivers.""" """Base Test class for Drivers."""
driver_name = "nova.volume.driver.FakeAOEDriver" driver_name = "nova.volume.driver.FakeBaseDriver"
def setUp(self): def setUp(self):
super(DriverTestCase, self).setUp() super(DriverTestCase, self).setUp()
@@ -295,83 +295,6 @@ class DriverTestCase(test.TestCase):
self.volume.delete_volume(self.context, volume_id) self.volume.delete_volume(self.context, volume_id)
class AOETestCase(DriverTestCase):
"""Test Case for AOEDriver"""
driver_name = "nova.volume.driver.AOEDriver"
def setUp(self):
super(AOETestCase, self).setUp()
def tearDown(self):
super(AOETestCase, self).tearDown()
def _attach_volume(self):
"""Attach volumes to an instance. This function also sets
a fake log message."""
volume_id_list = []
for index in xrange(3):
vol = {}
vol['size'] = 0
volume_id = db.volume_create(self.context,
vol)['id']
self.volume.create_volume(self.context, volume_id)
# each volume has a different mountpoint
mountpoint = "/dev/sd" + chr((ord('b') + index))
db.volume_attached(self.context, volume_id, self.instance_id,
mountpoint)
(shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
volume_id)
self.output += "%s %s eth0 /dev/nova-volumes/vol-foo auto run\n" \
% (shelf_id, blade_id)
volume_id_list.append(volume_id)
return volume_id_list
def test_check_for_export_with_no_volume(self):
"""No log message when no volume is attached to an instance."""
self.stream.truncate(0)
self.volume.check_for_export(self.context, self.instance_id)
self.assertEqual(self.stream.getvalue(), '')
def test_check_for_export_with_all_vblade_processes(self):
"""No log message when all the vblade processes are running."""
volume_id_list = self._attach_volume()
self.stream.truncate(0)
self.volume.check_for_export(self.context, self.instance_id)
self.assertEqual(self.stream.getvalue(), '')
self._detach_volume(volume_id_list)
def test_check_for_export_with_vblade_process_missing(self):
"""Output a warning message when some vblade processes aren't
running."""
volume_id_list = self._attach_volume()
# the first vblade process isn't running
self.output = self.output.replace("run", "down", 1)
(shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
volume_id_list[0])
msg_is_match = False
self.stream.truncate(0)
try:
self.volume.check_for_export(self.context, self.instance_id)
except exception.ProcessExecutionError, e:
volume_id = volume_id_list[0]
msg = _("Cannot confirm exported volume id:%(volume_id)s. "
"vblade process for e%(shelf_id)s.%(blade_id)s "
"isn't running.") % locals()
msg_is_match = (0 <= e.message.find(msg))
self.assertTrue(msg_is_match)
self._detach_volume(volume_id_list)
class ISCSITestCase(DriverTestCase): class ISCSITestCase(DriverTestCase):
"""Test Case for ISCSIDriver""" """Test Case for ISCSIDriver"""
driver_name = "nova.volume.driver.ISCSIDriver" driver_name = "nova.volume.driver.ISCSIDriver"
@@ -408,7 +331,7 @@ class ISCSITestCase(DriverTestCase):
self.assertEqual(self.stream.getvalue(), '') self.assertEqual(self.stream.getvalue(), '')
def test_check_for_export_with_all_volume_exported(self): def test_check_for_export_with_all_volume_exported(self):
"""No log message when all the vblade processes are running.""" """No log message when all the processes are running."""
volume_id_list = self._attach_volume() volume_id_list = self._attach_volume()
self.mox.StubOutWithMock(self.volume.driver, '_execute') self.mox.StubOutWithMock(self.volume.driver, '_execute')
@@ -431,7 +354,6 @@ class ISCSITestCase(DriverTestCase):
by ietd.""" by ietd."""
volume_id_list = self._attach_volume() volume_id_list = self._attach_volume()
# the first vblade process isn't running
tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0]) tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
self.mox.StubOutWithMock(self.volume.driver, '_execute') self.mox.StubOutWithMock(self.volume.driver, '_execute')
self.volume.driver._execute("ietadm", "--op", "show", self.volume.driver._execute("ietadm", "--op", "show",

View File

@@ -99,6 +99,20 @@ class XenAPIVolumeTestCase(test.TestCase):
vol['attach_status'] = "detached" vol['attach_status'] = "detached"
return db.volume_create(self.context, vol) return db.volume_create(self.context, vol)
@staticmethod
def _make_info():
return {
'driver_volume_type': 'iscsi',
'data': {
'volume_id': 1,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_portal': '127.0.0.1:3260,fake',
'auth_method': 'CHAP',
'auth_method': 'fake',
'auth_method': 'fake',
}
}
def test_create_iscsi_storage(self): def test_create_iscsi_storage(self):
"""This shows how to test helper classes' methods.""" """This shows how to test helper classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests) stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
@@ -106,7 +120,7 @@ class XenAPIVolumeTestCase(test.TestCase):
helper = volume_utils.VolumeHelper helper = volume_utils.VolumeHelper
helper.XenAPI = session.get_imported_xenapi() helper.XenAPI = session.get_imported_xenapi()
vol = self._create_volume() vol = self._create_volume()
info = helper.parse_volume_info(vol['id'], '/dev/sdc') info = helper.parse_volume_info(self._make_info(), '/dev/sdc')
label = 'SR-%s' % vol['id'] label = 'SR-%s' % vol['id']
description = 'Test-SR' description = 'Test-SR'
sr_ref = helper.create_iscsi_storage(session, info, label, description) sr_ref = helper.create_iscsi_storage(session, info, label, description)
@@ -124,8 +138,9 @@ class XenAPIVolumeTestCase(test.TestCase):
# oops, wrong mount point! # oops, wrong mount point!
self.assertRaises(volume_utils.StorageError, self.assertRaises(volume_utils.StorageError,
helper.parse_volume_info, helper.parse_volume_info,
vol['id'], self._make_info(),
'/dev/sd') 'dev/sd'
)
db.volume_destroy(context.get_admin_context(), vol['id']) db.volume_destroy(context.get_admin_context(), vol['id'])
def test_attach_volume(self): def test_attach_volume(self):
@@ -135,7 +150,8 @@ class XenAPIVolumeTestCase(test.TestCase):
volume = self._create_volume() volume = self._create_volume()
instance = db.instance_create(self.context, self.instance_values) instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance.name, 'Running') vm = xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc') result = conn.attach_volume(self._make_info(),
instance.name, '/dev/sdc')
def check(): def check():
# check that the VM has a VBD attached to it # check that the VM has a VBD attached to it