fixed reviewer's comment. 1. adding dest-instance-dir deleting operation to nova.compute.manager, 2. fix invalid raise statement

This commit is contained in:
Kei Masumoto
2011-06-21 20:51:07 +09:00
parent a6d5276461
commit c184fa5d03
6 changed files with 57 additions and 23 deletions

View File

@@ -1045,7 +1045,7 @@ class ComputeManager(manager.SchedulerDependentManager):
return self.driver.update_available_resource(context, self.host)
def pre_live_migration(self, context, instance_id, time=None,
block_migration=False, **kwargs):
block_migration=False, disk=None):
"""Preparations for live migration at dest host.
:param context: security context
@@ -1106,7 +1106,7 @@ class ComputeManager(manager.SchedulerDependentManager):
if block_migration:
self.driver.pre_block_migration(context,
instance_ref,
kwargs.get('disk'))
disk)
def live_migration(self, context, instance_id,
dest, block_migration=False):
@@ -1130,17 +1130,18 @@ class ComputeManager(manager.SchedulerDependentManager):
{"method": "check_for_export",
"args": {'instance_id': instance_id}})
args = {}
args['instance_id'] = instance_id
if block_migration:
args['block_migration'] = block_migration
args['disk'] = \
self.driver.get_instance_disk_info(context, instance_ref)
disk = self.driver.get_instance_disk_info(context,
instance_ref)
else:
disk = None
rpc.call(context,
self.db.queue_get_for(context, FLAGS.compute_topic, dest),
{"method": "pre_live_migration",
"args": args})
"args": {'instance_id': instance_id,
'block_migration': block_migration,
'disk': disk}})
except Exception:
i_name = instance_ref.name
@@ -1253,11 +1254,20 @@ class ComputeManager(manager.SchedulerDependentManager):
# any empty images has to be deleted.
# In current version argument dest != None means this method is
# called for error recovering
#if dest:
# rpc.cast(ctxt,
# self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest),
# {"method": "self.driver.destroy",
# "args": {'instance':instance_ref})
if dest:
rpc.cast(ctxt,
self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest),
{"method": "cleanup",
"args": {'instance_id': instance_ref['id']}})
def cleanup(self, ctxt, instance_id):
""" Cleaning up image directory that is created pre_live_migration.
:param ctxt: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
"""
instances_ref = self.db.instance_get(ctxt, instance_id)
self.driver.cleanup(instance_ref)
def periodic_tasks(self, context=None):
"""Tasks to be run at a periodic interval."""

View File

@@ -588,6 +588,10 @@ class InstanceExists(Duplicate):
message = _("Instance %(name)s already exists.")
class InvalidSharedStorage(NovaException):
message = _("%(path)s is on shared storage: %(reason)s")
class MigrationError(NovaException):
message = _("Migration error") + ": %(reason)s"

View File

@@ -200,7 +200,8 @@ class Scheduler(object):
try:
self.mounted_on_same_shared_storage(context, instance_ref, dest)
if block_migration:
raise
reason = "Block migration can not be used with shared storage."
raise exception.InvalidSharedStorage(reason=reason, path=dest)
except rpc.RemoteError:
if not block_migration:
raise

View File

@@ -556,7 +556,10 @@ class ComputeTestCase(test.TestCase):
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
rpc.call(c, topic, {"method": "pre_live_migration",
"args": {'instance_id': i_ref['id']}})
"args": {'instance_id': i_ref['id'],
'block_migration': False,
'disk': None}})
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
self.compute.post_live_migration,
@@ -582,7 +585,9 @@ class ComputeTestCase(test.TestCase):
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
rpc.call(c, topic, {"method": "pre_live_migration",
"args": {'instance_id': i_ref['id']}}).\
"args": {'instance_id': i_ref['id'],
'block_migration': False,
'disk': None}}).\
AndRaise(rpc.RemoteError('', '', ''))
dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
'state': power_state.RUNNING,
@@ -609,7 +614,9 @@ class ComputeTestCase(test.TestCase):
AndReturn(topic)
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, topic, {"method": "pre_live_migration",
"args": {'instance_id': i_ref['id']}}).\
"args": {'instance_id': i_ref['id'],
'block_migration': False,
'disk': None}}).\
AndRaise(rpc.RemoteError('', '', ''))
dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
'state': power_state.RUNNING,
@@ -634,7 +641,9 @@ class ComputeTestCase(test.TestCase):
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
AndReturn(topic)
rpc.call(c, topic, {"method": "pre_live_migration",
"args": {'instance_id': i_ref['id']}})
"args": {'instance_id': i_ref['id'],
'block_migration': False,
'disk': None}})
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
self.compute.post_live_migration,

View File

@@ -734,7 +734,7 @@ class LibvirtConnTestCase(test.TestCase):
# large disk space.
self.mox.StubOutWithMock(utils, "execute")
utils.execute('sudo', 'qemu-img', 'create', '-f', 'raw',
'%s/%s/disk' % (tmpdir, instance_ref.name), 10)
'%s/%s/disk' % (tmpdir, instance_ref.name), '10G')
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
@@ -759,10 +759,10 @@ class LibvirtConnTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, self.test_instance)
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
"<devices>"
"<disk type='file'><driver type='raw'/>"
"<disk type='file'><driver name='qemu' type='raw'/>"
"<source file='/test/disk'/>"
"<target dev='vda' bus='virtio'/></disk>"
"<disk type='file'><driver type='qcow2'/>"
"<disk type='file'><driver name='qemu' type='qcow2'/>"
"<source file='/test/disk.local'/>"
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")

View File

@@ -120,7 +120,7 @@ flags.DEFINE_string('live_migration_flag',
'Define live migration behavior.')
flags.DEFINE_string('block_migration_flag',
"VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, "
"VIR_MIGRATE_NON_SHARED_DISK",
"VIR_MIGRATE_NON_SHARED_INC",
'Define block migration behavior.')
flags.DEFINE_integer('live_migration_bandwidth', 0,
'Define live migration behavior')
@@ -295,7 +295,10 @@ class LibvirtConnection(driver.ComputeDriver):
# NOTE(justinsb): We remove the domain definition. We probably
# would do better to keep it if cleanup=False (e.g. volumes?)
# (e.g. #2 - not losing machines on failure)
virt_dom.undefine()
# NOTE(masumotok): Migrated instances does not have domain
# definitions.
if instance.name in self._conn.listDefinedDomains():
virt_dom.undefine()
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.warning(_("Error from libvirt during undefine of "
@@ -335,6 +338,13 @@ class LibvirtConnection(driver.ComputeDriver):
if os.path.exists(target):
shutil.rmtree(target)
def cleanup(self, instance):
""" Cleaning up image directory that is created pre_live_migration.
:param instance: nova.db.sqlalchemy.models.Instance
"""
self._cleanup(instance)
@exception.wrap_exception
def attach_volume(self, instance_name, device_path, mountpoint):
virt_dom = self._lookup_by_name(instance_name)