Merge "Don't use ec2 IDs in scheduler driver"
This commit is contained in:
commit
af6f0d8cb3
@ -21,7 +21,6 @@
|
|||||||
Scheduler base class that all Schedulers should inherit from
|
Scheduler base class that all Schedulers should inherit from
|
||||||
"""
|
"""
|
||||||
|
|
||||||
from nova.api.ec2 import ec2utils
|
|
||||||
from nova.compute import api as compute_api
|
from nova.compute import api as compute_api
|
||||||
from nova.compute import power_state
|
from nova.compute import power_state
|
||||||
from nova.compute import vm_states
|
from nova.compute import vm_states
|
||||||
@ -249,8 +248,8 @@ class Scheduler(object):
|
|||||||
|
|
||||||
# Checking instance is running.
|
# Checking instance is running.
|
||||||
if instance_ref['power_state'] != power_state.RUNNING:
|
if instance_ref['power_state'] != power_state.RUNNING:
|
||||||
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
|
raise exception.InstanceNotRunning(
|
||||||
raise exception.InstanceNotRunning(instance_id=instance_id)
|
instance_id=instance_ref['uuid'])
|
||||||
|
|
||||||
# Checing volume node is running when any volumes are mounted
|
# Checing volume node is running when any volumes are mounted
|
||||||
# to the instance.
|
# to the instance.
|
||||||
@ -291,9 +290,8 @@ class Scheduler(object):
|
|||||||
# and dest is not same.
|
# and dest is not same.
|
||||||
src = instance_ref['host']
|
src = instance_ref['host']
|
||||||
if dest == src:
|
if dest == src:
|
||||||
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
|
raise exception.UnableToMigrateToSelf(
|
||||||
raise exception.UnableToMigrateToSelf(instance_id=instance_id,
|
instance_id=instance_ref['uuid'], host=dest)
|
||||||
host=dest)
|
|
||||||
|
|
||||||
# Checking dst host still has enough capacities.
|
# Checking dst host still has enough capacities.
|
||||||
self.assert_compute_node_has_enough_resources(context,
|
self.assert_compute_node_has_enough_resources(context,
|
||||||
@ -417,8 +415,8 @@ class Scheduler(object):
|
|||||||
mem_inst = instance_ref['memory_mb']
|
mem_inst = instance_ref['memory_mb']
|
||||||
avail = avail - used
|
avail = avail - used
|
||||||
if avail <= mem_inst:
|
if avail <= mem_inst:
|
||||||
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
|
instance_uuid = instance_ref['uuid']
|
||||||
reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
|
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
|
||||||
"Lack of memory(host:%(avail)s <= "
|
"Lack of memory(host:%(avail)s <= "
|
||||||
"instance:%(mem_inst)s)")
|
"instance:%(mem_inst)s)")
|
||||||
raise exception.MigrationError(reason=reason % locals())
|
raise exception.MigrationError(reason=reason % locals())
|
||||||
@ -473,8 +471,8 @@ class Scheduler(object):
|
|||||||
|
|
||||||
# Check that available disk > necessary disk
|
# Check that available disk > necessary disk
|
||||||
if (available - necessary) < 0:
|
if (available - necessary) < 0:
|
||||||
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
|
instance_uuid = instance_ref['uuid']
|
||||||
reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
|
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
|
||||||
"Lack of disk(host:%(available)s "
|
"Lack of disk(host:%(available)s "
|
||||||
"<= instance:%(necessary)s)")
|
"<= instance:%(necessary)s)")
|
||||||
raise exception.MigrationError(reason=reason % locals())
|
raise exception.MigrationError(reason=reason % locals())
|
||||||
|
@ -415,7 +415,9 @@ class SchedulerTestCase(test.TestCase):
|
|||||||
def _live_migration_instance(self):
|
def _live_migration_instance(self):
|
||||||
volume1 = {'id': 31338}
|
volume1 = {'id': 31338}
|
||||||
volume2 = {'id': 31339}
|
volume2 = {'id': 31339}
|
||||||
return {'id': 31337, 'name': 'fake-instance',
|
return {'id': 31337,
|
||||||
|
'uuid': 'fake_uuid',
|
||||||
|
'name': 'fake-instance',
|
||||||
'host': 'fake_host1',
|
'host': 'fake_host1',
|
||||||
'volumes': [volume1, volume2],
|
'volumes': [volume1, volume2],
|
||||||
'power_state': power_state.RUNNING,
|
'power_state': power_state.RUNNING,
|
||||||
@ -575,15 +577,10 @@ class SchedulerTestCase(test.TestCase):
|
|||||||
|
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
|
|
||||||
c = False
|
self.assertRaises(exception.InstanceNotRunning,
|
||||||
try:
|
self.driver.schedule_live_migration, self.context,
|
||||||
self.driver.schedule_live_migration(self.context,
|
|
||||||
instance_id=instance['id'], dest=dest,
|
instance_id=instance['id'], dest=dest,
|
||||||
block_migration=block_migration)
|
block_migration=block_migration)
|
||||||
self._test_scheduler_live_migration(options)
|
|
||||||
except exception.Invalid, e:
|
|
||||||
c = (str(e).find('is not running') > 0)
|
|
||||||
self.assertTrue(c)
|
|
||||||
|
|
||||||
def test_live_migration_volume_node_not_alive(self):
|
def test_live_migration_volume_node_not_alive(self):
|
||||||
"""Raise exception when volume node is not alive."""
|
"""Raise exception when volume node is not alive."""
|
||||||
|
Loading…
Reference in New Issue
Block a user