Remove state altering in live-migration code
* Stops live migration from checking volume node because it will not be accessible if volumes are a separate service * Stops state modification of volumes during live migration because the volumes are not migrating * Partially implements blueprint volume-decoupling Change-Id: I07bdbc660ff220e2079e643a002c4c6bb5df6db7
This commit is contained in:
parent
90f305c399
commit
4465180626
|
@ -2065,10 +2065,6 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||
"args": {'instance_id': instance_ref['id'],
|
||||
'block_migration': block_migration}})
|
||||
|
||||
# Restore volume state
|
||||
for volume_ref in instance_ref['volumes']:
|
||||
self.volume_api.update(ctxt, volume_ref, {'status': 'in-use'})
|
||||
|
||||
# No instance booting at source host, but instance dir
|
||||
# must be deleted for preparing next block migration
|
||||
if block_migration:
|
||||
|
|
|
@ -351,10 +351,6 @@ class ServiceUnavailable(Invalid):
|
|||
message = _("Service is unavailable at this time.")
|
||||
|
||||
|
||||
class VolumeServiceUnavailable(ServiceUnavailable):
|
||||
message = _("Volume service is unavailable at this time.")
|
||||
|
||||
|
||||
class ComputeServiceUnavailable(ServiceUnavailable):
|
||||
message = _("Compute service is unavailable at this time.")
|
||||
|
||||
|
|
|
@ -227,12 +227,6 @@ class Scheduler(object):
|
|||
values = {"vm_state": vm_states.MIGRATING}
|
||||
db.instance_update(context, instance_id, values)
|
||||
|
||||
# Changing volume state
|
||||
for volume_ref in instance_ref['volumes']:
|
||||
db.volume_update(context,
|
||||
volume_ref['id'],
|
||||
{'status': 'migrating'})
|
||||
|
||||
src = instance_ref['host']
|
||||
cast_to_compute_host(context, src, 'live_migration',
|
||||
update_db=False,
|
||||
|
@ -255,13 +249,6 @@ class Scheduler(object):
|
|||
raise exception.InstanceNotRunning(
|
||||
instance_id=instance_ref['uuid'])
|
||||
|
||||
# Checing volume node is running when any volumes are mounted
|
||||
# to the instance.
|
||||
if len(instance_ref['volumes']) != 0:
|
||||
services = db.service_get_all_by_topic(context, 'volume')
|
||||
if len(services) < 1 or not utils.service_is_up(services[0]):
|
||||
raise exception.VolumeServiceUnavailable()
|
||||
|
||||
# Checking src host exists and compute node
|
||||
src = instance_ref['host']
|
||||
services = db.service_get_all_compute_by_host(context, src)
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
Tests For Scheduler
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import json
|
||||
|
||||
from nova.compute import api as compute_api
|
||||
|
@ -29,7 +28,6 @@ from nova import context
|
|||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova.notifier import api as notifier
|
||||
from nova import rpc
|
||||
from nova.rpc import common as rpc_common
|
||||
from nova.scheduler import driver
|
||||
|
@ -432,7 +430,6 @@ class SchedulerTestCase(test.TestCase):
|
|||
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
|
||||
self.mox.StubOutWithMock(db, 'instance_update')
|
||||
self.mox.StubOutWithMock(db, 'volume_update')
|
||||
self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
|
||||
|
||||
dest = 'fake_host2'
|
||||
|
@ -449,11 +446,6 @@ class SchedulerTestCase(test.TestCase):
|
|||
db.instance_update(self.context, instance['id'],
|
||||
{'vm_state': vm_states.MIGRATING})
|
||||
|
||||
db.volume_update(self.context, instance['volumes'][0]['id'],
|
||||
{'status': 'migrating'})
|
||||
db.volume_update(self.context, instance['volumes'][1]['id'],
|
||||
{'status': 'migrating'})
|
||||
|
||||
driver.cast_to_compute_host(self.context, instance['host'],
|
||||
'live_migration', update_db=False,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
|
@ -469,7 +461,6 @@ class SchedulerTestCase(test.TestCase):
|
|||
"""Test live migration when all checks pass."""
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_get')
|
||||
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
|
||||
self.mox.StubOutWithMock(utils, 'service_is_up')
|
||||
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
|
||||
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
|
||||
|
@ -478,7 +469,6 @@ class SchedulerTestCase(test.TestCase):
|
|||
self.mox.StubOutWithMock(rpc, 'call')
|
||||
self.mox.StubOutWithMock(rpc, 'cast')
|
||||
self.mox.StubOutWithMock(db, 'instance_update')
|
||||
self.mox.StubOutWithMock(db, 'volume_update')
|
||||
self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
|
||||
|
||||
dest = 'fake_host2'
|
||||
|
@ -488,9 +478,6 @@ class SchedulerTestCase(test.TestCase):
|
|||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
|
||||
# Source checks (volume and source compute are up)
|
||||
db.service_get_all_by_topic(self.context, 'volume').AndReturn(
|
||||
['fake_service'])
|
||||
utils.service_is_up('fake_service').AndReturn(True)
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
instance['host']).AndReturn(['fake_service2'])
|
||||
utils.service_is_up('fake_service2').AndReturn(True)
|
||||
|
@ -546,10 +533,6 @@ class SchedulerTestCase(test.TestCase):
|
|||
|
||||
db.instance_update(self.context, instance['id'],
|
||||
{'vm_state': vm_states.MIGRATING})
|
||||
db.volume_update(self.context, instance['volumes'][0]['id'],
|
||||
{'status': 'migrating'})
|
||||
db.volume_update(self.context, instance['volumes'][1]['id'],
|
||||
{'status': 'migrating'})
|
||||
|
||||
driver.cast_to_compute_host(self.context, instance['host'],
|
||||
'live_migration', update_db=False,
|
||||
|
@ -582,33 +565,10 @@ class SchedulerTestCase(test.TestCase):
|
|||
instance_id=instance['id'], dest=dest,
|
||||
block_migration=block_migration)
|
||||
|
||||
def test_live_migration_volume_node_not_alive(self):
|
||||
"""Raise exception when volume node is not alive."""
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_get')
|
||||
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
|
||||
self.mox.StubOutWithMock(utils, 'service_is_up')
|
||||
|
||||
dest = 'fake_host2'
|
||||
block_migration = False
|
||||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
# Volume down
|
||||
db.service_get_all_by_topic(self.context, 'volume').AndReturn(
|
||||
['fake_service'])
|
||||
utils.service_is_up('fake_service').AndReturn(False)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.VolumeServiceUnavailable,
|
||||
self.driver.schedule_live_migration, self.context,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
block_migration=block_migration)
|
||||
|
||||
def test_live_migration_compute_src_not_alive(self):
|
||||
"""Raise exception when src compute node is not alive."""
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_get')
|
||||
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
|
||||
self.mox.StubOutWithMock(utils, 'service_is_up')
|
||||
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
|
||||
|
||||
|
@ -616,10 +576,6 @@ class SchedulerTestCase(test.TestCase):
|
|||
block_migration = False
|
||||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
# Volume up
|
||||
db.service_get_all_by_topic(self.context, 'volume').AndReturn(
|
||||
['fake_service'])
|
||||
utils.service_is_up('fake_service').AndReturn(True)
|
||||
|
||||
# Compute down
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
|
|
Loading…
Reference in New Issue