Merge "Refactoring required for blueprint xenapi-live-migration"
This commit is contained in:
commit
9a283c6b37
@ -212,36 +212,30 @@ class Scheduler(object):
|
||||
The host where instance is running currently.
|
||||
Then scheduler send request that host.
|
||||
"""
|
||||
# Whether instance exists and is running.
|
||||
# Check we can do live migration
|
||||
instance_ref = db.instance_get(context, instance_id)
|
||||
|
||||
# Checking instance.
|
||||
self._live_migration_src_check(context, instance_ref)
|
||||
self._live_migration_dest_check(context, instance_ref, dest)
|
||||
self._live_migration_common_check(context, instance_ref, dest)
|
||||
self.compute_rpcapi.check_can_live_migrate_destination(context,
|
||||
instance_ref, dest, block_migration, disk_over_commit)
|
||||
|
||||
# Checking destination host.
|
||||
self._live_migration_dest_check(context, instance_ref,
|
||||
dest, block_migration,
|
||||
disk_over_commit)
|
||||
# Common checking.
|
||||
self._live_migration_common_check(context, instance_ref,
|
||||
dest, block_migration,
|
||||
disk_over_commit)
|
||||
|
||||
# Changing instance_state.
|
||||
# Change instance_state
|
||||
values = {"task_state": task_states.MIGRATING}
|
||||
|
||||
# update instance state and notify
|
||||
(old_ref, new_instance_ref) = db.instance_update_and_get_original(
|
||||
context, instance_id, values)
|
||||
context, instance_ref['uuid'], values)
|
||||
notifications.send_update(context, old_ref, new_instance_ref,
|
||||
service="scheduler")
|
||||
|
||||
# Perform migration
|
||||
src = instance_ref['host']
|
||||
cast_to_compute_host(context, src, 'live_migration',
|
||||
update_db=False,
|
||||
instance_id=instance_id,
|
||||
dest=dest,
|
||||
block_migration=block_migration)
|
||||
update_db=False,
|
||||
instance_id=instance_id,
|
||||
dest=dest,
|
||||
block_migration=block_migration)
|
||||
|
||||
def _live_migration_src_check(self, context, instance_ref):
|
||||
"""Live migration check routine (for src host).
|
||||
@ -250,7 +244,7 @@ class Scheduler(object):
|
||||
:param instance_ref: nova.db.sqlalchemy.models.Instance object
|
||||
|
||||
"""
|
||||
|
||||
# TODO(johngar) why is this not in the API layer?
|
||||
# Checking instance is running.
|
||||
if instance_ref['power_state'] != power_state.RUNNING:
|
||||
raise exception.InstanceNotRunning(
|
||||
@ -258,22 +252,21 @@ class Scheduler(object):
|
||||
|
||||
# Checking src host exists and compute node
|
||||
src = instance_ref['host']
|
||||
services = db.service_get_all_compute_by_host(context, src)
|
||||
try:
|
||||
services = db.service_get_all_compute_by_host(context, src)
|
||||
except exception.NotFound:
|
||||
raise exception.ComputeServiceUnavailable(host=src)
|
||||
|
||||
# Checking src host is alive.
|
||||
if not utils.service_is_up(services[0]):
|
||||
raise exception.ComputeServiceUnavailable(host=src)
|
||||
|
||||
def _live_migration_dest_check(self, context, instance_ref, dest,
|
||||
block_migration, disk_over_commit):
|
||||
def _live_migration_dest_check(self, context, instance_ref, dest):
|
||||
"""Live migration check routine (for destination host).
|
||||
|
||||
:param context: security context
|
||||
:param instance_ref: nova.db.sqlalchemy.models.Instance object
|
||||
:param dest: destination host
|
||||
:param block_migration: if true, block_migration.
|
||||
:param disk_over_commit: if True, consider real(not virtual)
|
||||
disk size.
|
||||
"""
|
||||
|
||||
# Checking dest exists and compute node.
|
||||
@ -291,15 +284,11 @@ class Scheduler(object):
|
||||
raise exception.UnableToMigrateToSelf(
|
||||
instance_id=instance_ref['uuid'], host=dest)
|
||||
|
||||
# Checking dst host still has enough capacities.
|
||||
self.assert_compute_node_has_enough_resources(context,
|
||||
instance_ref,
|
||||
dest,
|
||||
block_migration,
|
||||
disk_over_commit)
|
||||
# Check memory requirements
|
||||
self._assert_compute_node_has_enough_memory(context,
|
||||
instance_ref, dest)
|
||||
|
||||
def _live_migration_common_check(self, context, instance_ref, dest,
|
||||
block_migration, disk_over_commit):
|
||||
def _live_migration_common_check(self, context, instance_ref, dest):
|
||||
"""Live migration common check routine.
|
||||
|
||||
Below checkings are followed by
|
||||
@ -308,38 +297,10 @@ class Scheduler(object):
|
||||
:param context: security context
|
||||
:param instance_ref: nova.db.sqlalchemy.models.Instance object
|
||||
:param dest: destination host
|
||||
:param block_migration: if true, block_migration.
|
||||
:param disk_over_commit: if True, consider real(not virtual)
|
||||
disk size.
|
||||
|
||||
"""
|
||||
|
||||
# Checking shared storage connectivity
|
||||
# if block migration, instances_paths should not be on shared storage.
|
||||
shared = self.mounted_on_same_shared_storage(context, instance_ref,
|
||||
dest)
|
||||
if block_migration:
|
||||
if shared:
|
||||
reason = _("Block migration can not be used "
|
||||
"with shared storage.")
|
||||
raise exception.InvalidSharedStorage(reason=reason, path=dest)
|
||||
|
||||
elif not shared:
|
||||
reason = _("Live migration can not be used "
|
||||
"without shared storage.")
|
||||
raise exception.InvalidSharedStorage(reason=reason, path=dest)
|
||||
|
||||
# Checking destination host exists.
|
||||
dservice_refs = db.service_get_all_compute_by_host(context, dest)
|
||||
dservice_ref = dservice_refs[0]['compute_node'][0]
|
||||
|
||||
# Checking original host( where instance was launched at) exists.
|
||||
try:
|
||||
oservice_refs = db.service_get_all_compute_by_host(context,
|
||||
instance_ref['host'])
|
||||
except exception.NotFound:
|
||||
raise exception.SourceHostUnavailable()
|
||||
oservice_ref = oservice_refs[0]['compute_node'][0]
|
||||
dservice_ref = self._get_compute_info(context, dest)
|
||||
src = instance_ref['host']
|
||||
oservice_ref = self._get_compute_info(context, src)
|
||||
|
||||
# Checking hypervisor is same.
|
||||
orig_hypervisor = oservice_ref['hypervisor_type']
|
||||
@ -353,40 +314,7 @@ class Scheduler(object):
|
||||
if orig_hypervisor > dest_hypervisor:
|
||||
raise exception.DestinationHypervisorTooOld()
|
||||
|
||||
# Checking cpuinfo.
|
||||
try:
|
||||
self.compute_rpcapi.compare_cpu(context, oservice_ref['cpu_info'],
|
||||
dest)
|
||||
|
||||
except exception.InvalidCPUInfo:
|
||||
src = instance_ref['host']
|
||||
LOG.exception(_("host %(dest)s is not compatible with "
|
||||
"original host %(src)s.") % locals())
|
||||
raise
|
||||
|
||||
def assert_compute_node_has_enough_resources(self, context, instance_ref,
|
||||
dest, block_migration,
|
||||
disk_over_commit):
|
||||
|
||||
"""Checks if destination host has enough resource for live migration.
|
||||
|
||||
:param context: security context
|
||||
:param instance_ref: nova.db.sqlalchemy.models.Instance object
|
||||
:param dest: destination host
|
||||
:param block_migration: if true, block_migration.
|
||||
:param disk_over_commit: if True, consider real(not virtual)
|
||||
disk size.
|
||||
|
||||
"""
|
||||
self.assert_compute_node_has_enough_memory(context,
|
||||
instance_ref, dest)
|
||||
if not block_migration:
|
||||
return
|
||||
self.assert_compute_node_has_enough_disk(context,
|
||||
instance_ref, dest,
|
||||
disk_over_commit)
|
||||
|
||||
def assert_compute_node_has_enough_memory(self, context,
|
||||
def _assert_compute_node_has_enough_memory(self, context,
|
||||
instance_ref, dest):
|
||||
"""Checks if destination host has enough memory for live migration.
|
||||
|
||||
@ -397,7 +325,7 @@ class Scheduler(object):
|
||||
|
||||
"""
|
||||
# Getting total available memory of host
|
||||
avail = self._get_compute_info(context, dest, 'memory_mb')
|
||||
avail = self._get_compute_info(context, dest)['memory_mb']
|
||||
|
||||
# Getting total used memory and disk of host
|
||||
# It should be sum of memories that are assigned as max value,
|
||||
@ -414,54 +342,7 @@ class Scheduler(object):
|
||||
"instance:%(mem_inst)s)")
|
||||
raise exception.MigrationError(reason=reason % locals())
|
||||
|
||||
def assert_compute_node_has_enough_disk(self, context, instance_ref, dest,
|
||||
disk_over_commit):
|
||||
"""Checks if destination host has enough disk for block migration.
|
||||
|
||||
:param context: security context
|
||||
:param instance_ref: nova.db.sqlalchemy.models.Instance object
|
||||
:param dest: destination host
|
||||
:param disk_over_commit: if True, consider real(not virtual)
|
||||
disk size.
|
||||
|
||||
"""
|
||||
|
||||
# Libvirt supports qcow2 disk format,which is usually compressed
|
||||
# on compute nodes.
|
||||
# Real disk image (compressed) may enlarged to "virtual disk size",
|
||||
# that is specified as the maximum disk size.
|
||||
# (See qemu-img -f path-to-disk)
|
||||
# Scheduler recognizes destination host still has enough disk space
|
||||
# if real disk size < available disk size
|
||||
# if disk_over_commit is True,
|
||||
# otherwise virtual disk size < available disk size.
|
||||
|
||||
# Getting total available disk of host
|
||||
available_gb = self._get_compute_info(context,
|
||||
dest, 'disk_available_least')
|
||||
available = available_gb * (1024 ** 3)
|
||||
|
||||
# Getting necessary disk size
|
||||
ret = self.compute_rpcapi.get_instance_disk_info(context, instance_ref)
|
||||
disk_infos = jsonutils.loads(ret)
|
||||
|
||||
necessary = 0
|
||||
if disk_over_commit:
|
||||
for info in disk_infos:
|
||||
necessary += int(info['disk_size'])
|
||||
else:
|
||||
for info in disk_infos:
|
||||
necessary += int(info['virt_disk_size'])
|
||||
|
||||
# Check that available disk > necessary disk
|
||||
if (available - necessary) < 0:
|
||||
instance_uuid = instance_ref['uuid']
|
||||
reason = _("Unable to migrate %(instance_uuid)s to %(dest)s: "
|
||||
"Lack of disk(host:%(available)s "
|
||||
"<= instance:%(necessary)s)")
|
||||
raise exception.MigrationError(reason=reason % locals())
|
||||
|
||||
def _get_compute_info(self, context, host, key):
|
||||
def _get_compute_info(self, context, host):
|
||||
"""get compute node's information specified by key
|
||||
|
||||
:param context: security context
|
||||
@ -471,33 +352,4 @@ class Scheduler(object):
|
||||
|
||||
"""
|
||||
compute_node_ref = db.service_get_all_compute_by_host(context, host)
|
||||
compute_node_ref = compute_node_ref[0]['compute_node'][0]
|
||||
return compute_node_ref[key]
|
||||
|
||||
def mounted_on_same_shared_storage(self, context, instance_ref, dest):
|
||||
"""Check if the src and dest host mount same shared storage.
|
||||
|
||||
At first, dest host creates temp file, and src host can see
|
||||
it if they mounts same shared storage. Then src host erase it.
|
||||
|
||||
:param context: security context
|
||||
:param instance_ref: nova.db.sqlalchemy.models.Instance object
|
||||
:param dest: destination host
|
||||
|
||||
"""
|
||||
|
||||
src = instance_ref['host']
|
||||
|
||||
filename = self.compute_rpcapi.create_shared_storage_test_file(context,
|
||||
dest)
|
||||
|
||||
try:
|
||||
# make sure existence at src host.
|
||||
ret = self.compute_rpcapi.check_shared_storage_test_file(context,
|
||||
filename, src)
|
||||
|
||||
finally:
|
||||
self.compute_rpcapi.cleanup_shared_storage_test_file(context,
|
||||
filename, dest)
|
||||
|
||||
return ret
|
||||
return compute_node_ref[0]['compute_node'][0]
|
||||
|
@ -28,6 +28,7 @@ from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import notifications
|
||||
from nova.openstack.common import jsonutils
|
||||
from nova.openstack.common import rpc
|
||||
from nova.openstack.common.rpc import common as rpc_common
|
||||
@ -440,23 +441,32 @@ class SchedulerTestCase(test.TestCase):
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
|
||||
self.mox.StubOutWithMock(self.driver.compute_rpcapi,
|
||||
'check_can_live_migrate_destination')
|
||||
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
|
||||
self.mox.StubOutWithMock(driver, 'cast_to_compute_host')
|
||||
self.mox.StubOutWithMock(notifications, 'send_update')
|
||||
|
||||
dest = 'fake_host2'
|
||||
block_migration = False
|
||||
disk_over_commit = False
|
||||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
instance_id = instance['id']
|
||||
instance_uuid = instance['uuid']
|
||||
db.instance_get(self.context,
|
||||
instance_id).AndReturn(instance)
|
||||
|
||||
self.driver._live_migration_src_check(self.context, instance)
|
||||
self.driver._live_migration_dest_check(self.context, instance,
|
||||
dest, block_migration, disk_over_commit)
|
||||
self.driver._live_migration_dest_check(self.context, instance, dest)
|
||||
self.driver._live_migration_common_check(self.context, instance,
|
||||
dest, block_migration, disk_over_commit)
|
||||
db.instance_update_and_get_original(self.context, instance['id'],
|
||||
dest)
|
||||
self.driver.compute_rpcapi.check_can_live_migrate_destination(
|
||||
self.context, instance, dest, block_migration, disk_over_commit)
|
||||
db.instance_update_and_get_original(self.context, instance_uuid,
|
||||
{"task_state": task_states.MIGRATING}).AndReturn(
|
||||
(instance, instance))
|
||||
notifications.send_update(self.context, instance, instance,
|
||||
service="scheduler")
|
||||
|
||||
driver.cast_to_compute_host(self.context, instance['host'],
|
||||
'live_migration', update_db=False,
|
||||
@ -469,38 +479,13 @@ class SchedulerTestCase(test.TestCase):
|
||||
block_migration=block_migration,
|
||||
disk_over_commit=disk_over_commit)
|
||||
|
||||
def _check_shared_storage(self, dest, instance, check_result):
|
||||
tmp_filename = 'test-filename'
|
||||
rpc.queue_get_for(self.context, FLAGS.compute_topic,
|
||||
dest).AndReturn('dest_queue')
|
||||
rpc.call(self.context, 'dest_queue',
|
||||
{'method': 'create_shared_storage_test_file',
|
||||
'args': {},
|
||||
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
|
||||
None).AndReturn(tmp_filename)
|
||||
rpc.queue_get_for(self.context, FLAGS.compute_topic,
|
||||
instance['host']).AndReturn('src_queue')
|
||||
rpc.call(self.context, 'src_queue',
|
||||
{'method': 'check_shared_storage_test_file',
|
||||
'args': {'filename': tmp_filename},
|
||||
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
|
||||
None).AndReturn(check_result)
|
||||
rpc.queue_get_for(self.context, FLAGS.compute_topic,
|
||||
dest).AndReturn('dest_queue')
|
||||
rpc.cast(self.context, 'dest_queue',
|
||||
{'method': 'cleanup_shared_storage_test_file',
|
||||
'args': {'filename': tmp_filename},
|
||||
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
|
||||
|
||||
def test_live_migration_all_checks_pass(self):
|
||||
"""Test live migration when all checks pass."""
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_get')
|
||||
self.mox.StubOutWithMock(utils, 'service_is_up')
|
||||
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
|
||||
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
|
||||
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
|
||||
self.mox.StubOutWithMock(rpc, 'queue_get_for')
|
||||
self.mox.StubOutWithMock(rpc, 'call')
|
||||
self.mox.StubOutWithMock(rpc, 'cast')
|
||||
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
|
||||
@ -510,8 +495,12 @@ class SchedulerTestCase(test.TestCase):
|
||||
block_migration = True
|
||||
disk_over_commit = True
|
||||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
instance_id = instance['id']
|
||||
instance_uuid = instance['uuid']
|
||||
db.instance_get(self.context,
|
||||
instance_id).AndReturn(instance)
|
||||
|
||||
# Source checks
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
instance['host']).AndReturn(['fake_service2'])
|
||||
utils.service_is_up('fake_service2').AndReturn(True)
|
||||
@ -521,60 +510,42 @@ class SchedulerTestCase(test.TestCase):
|
||||
dest).AndReturn(['fake_service3'])
|
||||
utils.service_is_up('fake_service3').AndReturn(True)
|
||||
# assert_compute_node_has_enough_memory()
|
||||
self.driver._get_compute_info(self.context, dest,
|
||||
'memory_mb').AndReturn(2048)
|
||||
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
|
||||
[{'compute_node': [{'memory_mb': 2048,
|
||||
'hypervisor_version': 1}]}])
|
||||
db.instance_get_all_by_host(self.context, dest).AndReturn(
|
||||
[dict(memory_mb=256), dict(memory_mb=512)])
|
||||
# assert_compute_node_has_enough_disk()
|
||||
self.driver._get_compute_info(self.context, dest,
|
||||
'disk_available_least').AndReturn(1025)
|
||||
rpc.queue_get_for(self.context, FLAGS.compute_topic,
|
||||
instance['host']).AndReturn('src_queue1')
|
||||
instance_disk_info_msg = {
|
||||
'method': 'get_instance_disk_info',
|
||||
'args': {
|
||||
'instance_name': instance['name'],
|
||||
},
|
||||
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION,
|
||||
}
|
||||
instance_disk_info = [{'disk_size': 1024 * (1024 ** 3)}]
|
||||
rpc.call(self.context,
|
||||
'src_queue1',
|
||||
instance_disk_info_msg,
|
||||
None).AndReturn(jsonutils.dumps(instance_disk_info))
|
||||
|
||||
# Common checks (shared storage ok, same hypervisor, etc)
|
||||
self._check_shared_storage(dest, instance, False)
|
||||
|
||||
# Common checks (same hypervisor, etc)
|
||||
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
|
||||
[{'compute_node': [{'hypervisor_type': 'xen',
|
||||
'hypervisor_version': 1}]}])
|
||||
# newer hypervisor version for src
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
instance['host']).AndReturn(
|
||||
[{'compute_node': [{'hypervisor_type': 'xen',
|
||||
'hypervisor_version': 1,
|
||||
'cpu_info': 'fake_cpu_info'}]}])
|
||||
rpc.queue_get_for(self.context, FLAGS.compute_topic,
|
||||
dest).AndReturn('dest_queue')
|
||||
rpc.call(self.context, 'dest_queue',
|
||||
{'method': 'compare_cpu',
|
||||
'args': {'cpu_info': 'fake_cpu_info'},
|
||||
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
|
||||
None).AndReturn(True)
|
||||
|
||||
db.instance_update_and_get_original(self.context, instance['id'],
|
||||
rpc.call(self.context, "compute.fake_host2",
|
||||
{"method": 'check_can_live_migrate_destination',
|
||||
"args": {'instance_id': instance_id,
|
||||
'block_migration': block_migration,
|
||||
'disk_over_commit': disk_over_commit},
|
||||
"version": "1.2"},
|
||||
None)
|
||||
|
||||
db.instance_update_and_get_original(self.context, instance_uuid,
|
||||
{"task_state": task_states.MIGRATING}).AndReturn(
|
||||
(instance, instance))
|
||||
|
||||
driver.cast_to_compute_host(self.context, instance['host'],
|
||||
'live_migration', update_db=False,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
instance_id=instance_id, dest=dest,
|
||||
block_migration=block_migration)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
result = self.driver.schedule_live_migration(self.context,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
instance_id=instance_id, dest=dest,
|
||||
block_migration=block_migration,
|
||||
disk_over_commit=disk_over_commit)
|
||||
self.assertEqual(result, None)
|
||||
@ -587,17 +558,44 @@ class SchedulerTestCase(test.TestCase):
|
||||
dest = 'fake_host2'
|
||||
block_migration = False
|
||||
instance = self._live_migration_instance()
|
||||
instance_id = instance['id']
|
||||
instance['power_state'] = power_state.NOSTATE
|
||||
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
db.instance_get(self.context,
|
||||
instance_id).AndReturn(instance)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.assertRaises(exception.InstanceNotRunning,
|
||||
self.driver.schedule_live_migration, self.context,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
instance_id=instance_id, dest=dest,
|
||||
block_migration=block_migration)
|
||||
|
||||
def test_live_migration_compute_src_not_exist(self):
|
||||
"""Raise exception when src compute node is does not exist."""
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_get')
|
||||
self.mox.StubOutWithMock(utils, 'service_is_up')
|
||||
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
|
||||
|
||||
dest = 'fake_host2'
|
||||
block_migration = False
|
||||
instance = self._live_migration_instance()
|
||||
instance_id = instance['id']
|
||||
db.instance_get(self.context,
|
||||
instance_id).AndReturn(instance)
|
||||
|
||||
# Compute down
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
instance['host']).AndRaise(
|
||||
exception.NotFound())
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.ComputeServiceUnavailable,
|
||||
self.driver.schedule_live_migration, self.context,
|
||||
instance_id=instance_id, dest=dest,
|
||||
block_migration=block_migration)
|
||||
|
||||
def test_live_migration_compute_src_not_alive(self):
|
||||
"""Raise exception when src compute node is not alive."""
|
||||
|
||||
@ -608,7 +606,9 @@ class SchedulerTestCase(test.TestCase):
|
||||
dest = 'fake_host2'
|
||||
block_migration = False
|
||||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
instance_id = instance['id']
|
||||
db.instance_get(self.context,
|
||||
instance_id).AndReturn(instance)
|
||||
|
||||
# Compute down
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
@ -618,7 +618,7 @@ class SchedulerTestCase(test.TestCase):
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.ComputeServiceUnavailable,
|
||||
self.driver.schedule_live_migration, self.context,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
instance_id=instance_id, dest=dest,
|
||||
block_migration=block_migration)
|
||||
|
||||
def test_live_migration_compute_dest_not_alive(self):
|
||||
@ -632,7 +632,9 @@ class SchedulerTestCase(test.TestCase):
|
||||
dest = 'fake_host2'
|
||||
block_migration = False
|
||||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
instance_id = instance['id']
|
||||
db.instance_get(self.context,
|
||||
instance_id).AndReturn(instance)
|
||||
|
||||
self.driver._live_migration_src_check(self.context, instance)
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
@ -643,7 +645,7 @@ class SchedulerTestCase(test.TestCase):
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.ComputeServiceUnavailable,
|
||||
self.driver.schedule_live_migration, self.context,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
instance_id=instance_id, dest=dest,
|
||||
block_migration=block_migration)
|
||||
|
||||
def test_live_migration_dest_check_service_same_host(self):
|
||||
@ -657,10 +659,12 @@ class SchedulerTestCase(test.TestCase):
|
||||
block_migration = False
|
||||
disk_over_commit = False
|
||||
instance = self._live_migration_instance()
|
||||
instance_id = instance['id']
|
||||
# make dest same as src
|
||||
dest = instance['host']
|
||||
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
db.instance_get(self.context,
|
||||
instance_id).AndReturn(instance)
|
||||
|
||||
self.driver._live_migration_src_check(self.context, instance)
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
@ -670,7 +674,7 @@ class SchedulerTestCase(test.TestCase):
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.UnableToMigrateToSelf,
|
||||
self.driver.schedule_live_migration, self.context,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
instance_id=instance_id, dest=dest,
|
||||
block_migration=block_migration,
|
||||
disk_over_commit=False)
|
||||
|
||||
@ -688,138 +692,29 @@ class SchedulerTestCase(test.TestCase):
|
||||
block_migration = False
|
||||
disk_over_commit = False
|
||||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
instance_id = instance['id']
|
||||
db.instance_get(self.context,
|
||||
instance_id).AndReturn(instance)
|
||||
|
||||
self.driver._live_migration_src_check(self.context, instance)
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
dest).AndReturn(['fake_service3'])
|
||||
utils.service_is_up('fake_service3').AndReturn(True)
|
||||
|
||||
self.driver._get_compute_info(self.context, dest,
|
||||
'memory_mb').AndReturn(2048)
|
||||
self.driver._get_compute_info(self.context, dest).AndReturn(
|
||||
{'memory_mb': 2048})
|
||||
db.instance_get_all_by_host(self.context, dest).AndReturn(
|
||||
[dict(memory_mb=1024), dict(memory_mb=512)])
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.MigrationError,
|
||||
self.driver.schedule_live_migration, self.context,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
block_migration=block_migration,
|
||||
disk_over_commit=disk_over_commit)
|
||||
|
||||
def test_block_migration_dest_check_service_lack_disk(self):
|
||||
"""Confirms exception raises when dest doesn't have enough disk."""
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_get')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
|
||||
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
|
||||
self.mox.StubOutWithMock(utils, 'service_is_up')
|
||||
self.mox.StubOutWithMock(self.driver,
|
||||
'assert_compute_node_has_enough_memory')
|
||||
self.mox.StubOutWithMock(self.driver, '_get_compute_info')
|
||||
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
|
||||
self.mox.StubOutWithMock(rpc, 'queue_get_for')
|
||||
self.mox.StubOutWithMock(rpc, 'call')
|
||||
|
||||
dest = 'fake_host2'
|
||||
block_migration = True
|
||||
disk_over_commit = True
|
||||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
|
||||
self.driver._live_migration_src_check(self.context, instance)
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
dest).AndReturn(['fake_service3'])
|
||||
utils.service_is_up('fake_service3').AndReturn(True)
|
||||
|
||||
# Enough memory
|
||||
self.driver.assert_compute_node_has_enough_memory(self.context,
|
||||
instance, dest)
|
||||
|
||||
# Not enough disk
|
||||
self.driver._get_compute_info(self.context, dest,
|
||||
'disk_available_least').AndReturn(1023)
|
||||
rpc.queue_get_for(self.context, FLAGS.compute_topic,
|
||||
instance['host']).AndReturn('src_queue')
|
||||
instance_disk_info_msg = {
|
||||
'method': 'get_instance_disk_info',
|
||||
'args': {
|
||||
'instance_name': instance['name'],
|
||||
},
|
||||
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION,
|
||||
}
|
||||
instance_disk_info = [{'disk_size': 1024 * (1024 ** 3)}]
|
||||
rpc.call(self.context,
|
||||
'src_queue',
|
||||
instance_disk_info_msg,
|
||||
None).AndReturn(jsonutils.dumps(instance_disk_info))
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.MigrationError,
|
||||
self.driver.schedule_live_migration, self.context,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
block_migration=block_migration,
|
||||
disk_over_commit=disk_over_commit)
|
||||
|
||||
def test_live_migration_different_shared_storage_raises(self):
|
||||
"""Src and dest must have same shared storage for live migration"""
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_get')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
|
||||
self.mox.StubOutWithMock(rpc, 'queue_get_for')
|
||||
self.mox.StubOutWithMock(rpc, 'call')
|
||||
self.mox.StubOutWithMock(rpc, 'cast')
|
||||
|
||||
dest = 'fake_host2'
|
||||
block_migration = False
|
||||
disk_over_commit = False
|
||||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
|
||||
self.driver._live_migration_src_check(self.context, instance)
|
||||
self.driver._live_migration_dest_check(self.context, instance,
|
||||
dest, block_migration, disk_over_commit)
|
||||
|
||||
self._check_shared_storage(dest, instance, False)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.InvalidSharedStorage,
|
||||
self.driver.schedule_live_migration, self.context,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
block_migration=block_migration,
|
||||
disk_over_commit=disk_over_commit)
|
||||
|
||||
def test_live_migration_same_shared_storage_okay(self):
|
||||
"""live migration works with same src and dest shared storage"""
|
||||
|
||||
self.mox.StubOutWithMock(db, 'instance_get')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
|
||||
self.mox.StubOutWithMock(rpc, 'queue_get_for')
|
||||
self.mox.StubOutWithMock(rpc, 'call')
|
||||
self.mox.StubOutWithMock(rpc, 'cast')
|
||||
|
||||
dest = 'fake_host2'
|
||||
block_migration = False
|
||||
disk_over_commit = False
|
||||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
|
||||
self.driver._live_migration_src_check(self.context, instance)
|
||||
self.driver._live_migration_dest_check(self.context, instance,
|
||||
dest, block_migration, disk_over_commit)
|
||||
|
||||
self._check_shared_storage(dest, instance, False)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.InvalidSharedStorage,
|
||||
self.driver.schedule_live_migration, self.context,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
instance_id=instance_id, dest=dest,
|
||||
block_migration=block_migration,
|
||||
disk_over_commit=disk_over_commit)
|
||||
|
||||
def test_live_migration_different_hypervisor_type_raises(self):
|
||||
"""Confirm live_migration to hypervisor of different type raises"""
|
||||
self.mox.StubOutWithMock(db, 'instance_get')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
|
||||
@ -832,18 +727,16 @@ class SchedulerTestCase(test.TestCase):
|
||||
block_migration = False
|
||||
disk_over_commit = False
|
||||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
instance_id = instance['id']
|
||||
db.instance_get(self.context,
|
||||
instance_id).AndReturn(instance)
|
||||
|
||||
self.driver._live_migration_src_check(self.context, instance)
|
||||
self.driver._live_migration_dest_check(self.context, instance,
|
||||
dest, block_migration, disk_over_commit)
|
||||
|
||||
self._check_shared_storage(dest, instance, True)
|
||||
self.driver._live_migration_dest_check(self.context, instance, dest)
|
||||
|
||||
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
|
||||
[{'compute_node': [{'hypervisor_type': 'xen',
|
||||
'hypervisor_version': 1}]}])
|
||||
# different hypervisor type
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
instance['host']).AndReturn(
|
||||
[{'compute_node': [{'hypervisor_type': 'not-xen',
|
||||
@ -852,11 +745,12 @@ class SchedulerTestCase(test.TestCase):
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.InvalidHypervisorType,
|
||||
self.driver.schedule_live_migration, self.context,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
instance_id=instance_id, dest=dest,
|
||||
block_migration=block_migration,
|
||||
disk_over_commit=disk_over_commit)
|
||||
|
||||
def test_live_migration_dest_hypervisor_version_older_raises(self):
|
||||
"""Confirm live migration to older hypervisor raises"""
|
||||
self.mox.StubOutWithMock(db, 'instance_get')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
|
||||
@ -869,18 +763,16 @@ class SchedulerTestCase(test.TestCase):
|
||||
block_migration = False
|
||||
disk_over_commit = False
|
||||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
instance_id = instance['id']
|
||||
db.instance_get(self.context,
|
||||
instance_id).AndReturn(instance)
|
||||
|
||||
self.driver._live_migration_src_check(self.context, instance)
|
||||
self.driver._live_migration_dest_check(self.context, instance,
|
||||
dest, block_migration, disk_over_commit)
|
||||
|
||||
self._check_shared_storage(dest, instance, True)
|
||||
self.driver._live_migration_dest_check(self.context, instance, dest)
|
||||
|
||||
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
|
||||
[{'compute_node': [{'hypervisor_type': 'xen',
|
||||
'hypervisor_version': 1}]}])
|
||||
# newer hypervisor version for src
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
instance['host']).AndReturn(
|
||||
[{'compute_node': [{'hypervisor_type': 'xen',
|
||||
@ -888,53 +780,10 @@ class SchedulerTestCase(test.TestCase):
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.DestinationHypervisorTooOld,
|
||||
self.driver.schedule_live_migration, self.context,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
instance_id=instance_id, dest=dest,
|
||||
block_migration=block_migration,
|
||||
disk_over_commit=disk_over_commit)
|
||||
|
||||
def test_live_migration_dest_host_incompatable_cpu_raises(self):
|
||||
self.mox.StubOutWithMock(db, 'instance_get')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
|
||||
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
|
||||
self.mox.StubOutWithMock(rpc, 'queue_get_for')
|
||||
self.mox.StubOutWithMock(rpc, 'call')
|
||||
self.mox.StubOutWithMock(rpc, 'cast')
|
||||
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
|
||||
|
||||
dest = 'fake_host2'
|
||||
block_migration = False
|
||||
disk_over_commit = False
|
||||
instance = self._live_migration_instance()
|
||||
db.instance_get(self.context, instance['id']).AndReturn(instance)
|
||||
|
||||
self.driver._live_migration_src_check(self.context, instance)
|
||||
self.driver._live_migration_dest_check(self.context, instance,
|
||||
dest, block_migration, disk_over_commit)
|
||||
|
||||
self._check_shared_storage(dest, instance, True)
|
||||
|
||||
db.service_get_all_compute_by_host(self.context, dest).AndReturn(
|
||||
[{'compute_node': [{'hypervisor_type': 'xen',
|
||||
'hypervisor_version': 1}]}])
|
||||
db.service_get_all_compute_by_host(self.context,
|
||||
instance['host']).AndReturn(
|
||||
[{'compute_node': [{'hypervisor_type': 'xen',
|
||||
'hypervisor_version': 1,
|
||||
'cpu_info': 'fake_cpu_info'}]}])
|
||||
rpc.queue_get_for(self.context, FLAGS.compute_topic,
|
||||
dest).AndReturn('dest_queue')
|
||||
rpc.call(self.context, 'dest_queue',
|
||||
{'method': 'compare_cpu',
|
||||
'args': {'cpu_info': 'fake_cpu_info'},
|
||||
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION},
|
||||
None).AndRaise(rpc_common.RemoteError())
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(rpc_common.RemoteError,
|
||||
self.driver.schedule_live_migration, self.context,
|
||||
instance_id=instance['id'], dest=dest,
|
||||
block_migration=block_migration)
|
||||
|
||||
|
||||
class SchedulerDriverBaseTestCase(SchedulerTestCase):
|
||||
"""Test cases for base scheduler driver class methods
|
||||
|
@ -30,6 +30,8 @@ from xml.dom import minidom
|
||||
from nova.api.ec2 import cloud
|
||||
from nova.compute import instance_types
|
||||
from nova.compute import power_state
|
||||
from nova.compute import rpcapi as compute_rpcapi
|
||||
from nova.compute import utils as compute_utils
|
||||
from nova.compute import vm_states
|
||||
from nova import context
|
||||
from nova import db
|
||||
@ -1483,6 +1485,166 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
|
||||
db.instance_destroy(self.context, instance_ref['uuid'])
|
||||
|
||||
def test_check_can_live_migrate_dest_all_pass_with_block_migration(self):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
dest = "fake_host_2"
|
||||
src = instance_ref['host']
|
||||
conn = libvirt_driver.LibvirtDriver(False)
|
||||
|
||||
self.mox.StubOutWithMock(conn, '_get_compute_info')
|
||||
self.mox.StubOutWithMock(conn, 'get_instance_disk_info')
|
||||
self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
|
||||
self.mox.StubOutWithMock(conn, '_compare_cpu')
|
||||
|
||||
conn._get_compute_info(self.context, FLAGS.host).AndReturn(
|
||||
{'disk_available_least': 400})
|
||||
conn.get_instance_disk_info(instance_ref["name"]).AndReturn(
|
||||
'[{"virt_disk_size":2}]')
|
||||
# _check_cpu_match
|
||||
conn._get_compute_info(self.context,
|
||||
src).AndReturn({'cpu_info': "asdf"})
|
||||
conn._compare_cpu("asdf")
|
||||
|
||||
# mounted_on_same_shared_storage
|
||||
filename = "file"
|
||||
conn._create_shared_storage_test_file().AndReturn(filename)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
return_value = conn.check_can_live_migrate_destination(self.context,
|
||||
instance_ref, True, False)
|
||||
self.assertDictMatch(return_value,
|
||||
{"filename": "file", "block_migration": True})
|
||||
|
||||
def test_check_can_live_migrate_dest_all_pass_no_block_migration(self):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
dest = "fake_host_2"
|
||||
src = instance_ref['host']
|
||||
conn = libvirt_driver.LibvirtDriver(False)
|
||||
|
||||
self.mox.StubOutWithMock(conn, '_get_compute_info')
|
||||
self.mox.StubOutWithMock(conn, '_create_shared_storage_test_file')
|
||||
self.mox.StubOutWithMock(conn, '_compare_cpu')
|
||||
|
||||
# _check_cpu_match
|
||||
conn._get_compute_info(self.context,
|
||||
src).AndReturn({'cpu_info': "asdf"})
|
||||
conn._compare_cpu("asdf")
|
||||
|
||||
# mounted_on_same_shared_storage
|
||||
filename = "file"
|
||||
conn._create_shared_storage_test_file().AndReturn(filename)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
return_value = conn.check_can_live_migrate_destination(self.context,
|
||||
instance_ref, False, False)
|
||||
self.assertDictMatch(return_value,
|
||||
{"filename": "file", "block_migration": False})
|
||||
|
||||
def test_check_can_live_migrate_dest_fails_not_enough_disk(self):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
dest = "fake_host_2"
|
||||
src = instance_ref['host']
|
||||
conn = libvirt_driver.LibvirtDriver(False)
|
||||
|
||||
self.mox.StubOutWithMock(conn, '_get_compute_info')
|
||||
self.mox.StubOutWithMock(conn, 'get_instance_disk_info')
|
||||
|
||||
conn._get_compute_info(self.context, FLAGS.host).AndReturn(
|
||||
{'disk_available_least': 0})
|
||||
conn.get_instance_disk_info(instance_ref["name"]).AndReturn(
|
||||
'[{"virt_disk_size":2}]')
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.MigrationError,
|
||||
conn.check_can_live_migrate_destination,
|
||||
self.context, instance_ref, True, False)
|
||||
|
||||
def test_check_can_live_migrate_dest_incompatible_cpu_raises(self):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
dest = "fake_host_2"
|
||||
src = instance_ref['host']
|
||||
conn = libvirt_driver.LibvirtDriver(False)
|
||||
|
||||
self.mox.StubOutWithMock(conn, '_get_compute_info')
|
||||
self.mox.StubOutWithMock(conn, '_compare_cpu')
|
||||
|
||||
conn._get_compute_info(self.context, src).AndReturn(
|
||||
{'cpu_info': "asdf"})
|
||||
conn._compare_cpu("asdf").AndRaise(exception.InvalidCPUInfo)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.InvalidCPUInfo,
|
||||
conn.check_can_live_migrate_destination,
|
||||
self.context, instance_ref, False, False)
|
||||
|
||||
def test_check_can_live_migrate_dest_fail_space_with_block_migration(self):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
dest = "fake_host_2"
|
||||
src = instance_ref['host']
|
||||
conn = libvirt_driver.LibvirtDriver(False)
|
||||
|
||||
self.mox.StubOutWithMock(conn, '_get_compute_info')
|
||||
self.mox.StubOutWithMock(conn, 'get_instance_disk_info')
|
||||
|
||||
conn._get_compute_info(self.context, FLAGS.host).AndReturn(
|
||||
{'disk_available_least': 0})
|
||||
conn.get_instance_disk_info(instance_ref["name"]).AndReturn(
|
||||
'[{"virt_disk_size":2}]')
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.MigrationError,
|
||||
conn.check_can_live_migrate_destination,
|
||||
self.context, instance_ref, True, False)
|
||||
|
||||
def test_check_can_live_migrate_dest_cleanup_works_correctly(self):
|
||||
dest_check_data = {"filename": "file", "block_migration": True}
|
||||
conn = libvirt_driver.LibvirtDriver(False)
|
||||
|
||||
self.mox.StubOutWithMock(conn, '_cleanup_shared_storage_test_file')
|
||||
conn._cleanup_shared_storage_test_file("file")
|
||||
|
||||
self.mox.ReplayAll()
|
||||
conn.check_can_live_migrate_destination_cleanup(self.context,
|
||||
dest_check_data)
|
||||
|
||||
def test_check_can_live_migrate_source_works_correctly(self):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
dest_check_data = {"filename": "file", "block_migration": True}
|
||||
conn = libvirt_driver.LibvirtDriver(False)
|
||||
|
||||
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
|
||||
conn._check_shared_storage_test_file("file").AndReturn(False)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
conn.check_can_live_migrate_source(self.context, instance_ref,
|
||||
dest_check_data)
|
||||
|
||||
def test_check_can_live_migrate_dest_fail_shared_storage_with_blockm(self):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
dest_check_data = {"filename": "file", "block_migration": True}
|
||||
conn = libvirt_driver.LibvirtDriver(False)
|
||||
|
||||
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
|
||||
conn._check_shared_storage_test_file("file").AndReturn(True)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.InvalidSharedStorage,
|
||||
conn.check_can_live_migrate_source,
|
||||
self.context, instance_ref, dest_check_data)
|
||||
|
||||
def test_check_can_live_migrate_no_shared_storage_no_blck_mig_raises(self):
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
dest_check_data = {"filename": "file", "block_migration": False}
|
||||
conn = libvirt_driver.LibvirtDriver(False)
|
||||
|
||||
self.mox.StubOutWithMock(conn, "_check_shared_storage_test_file")
|
||||
conn._check_shared_storage_test_file("file").AndReturn(False)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.InvalidSharedStorage,
|
||||
conn.check_can_live_migrate_source,
|
||||
self.context, instance_ref, dest_check_data)
|
||||
|
||||
def test_live_migration_raises_exception(self):
|
||||
"""Confirms recover method is called when exceptions are raised."""
|
||||
# Preparing data
|
||||
@ -1535,7 +1697,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
db.volume_destroy(self.context, volume_ref['id'])
|
||||
db.instance_destroy(self.context, instance_ref['uuid'])
|
||||
|
||||
def test_pre_live_migration_works_correctly(self):
|
||||
def test_pre_live_migration_works_correctly_mocked(self):
|
||||
"""Confirms pre_block_migration works correctly."""
|
||||
# Creating testdata
|
||||
vol = {'block_device_mapping': [
|
||||
@ -1543,6 +1705,14 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
{'connection_info': 'dummy', 'mount_device': '/dev/sdb'}]}
|
||||
conn = libvirt_driver.LibvirtDriver(False)
|
||||
|
||||
class FakeNetworkInfo():
|
||||
def fixed_ips(self):
|
||||
return ["test_ip_addr"]
|
||||
|
||||
inst_ref = {'id': 'foo'}
|
||||
c = context.get_admin_context()
|
||||
nw_info = FakeNetworkInfo()
|
||||
|
||||
# Creating mocks
|
||||
self.mox.StubOutWithMock(driver, "block_device_info_get_mapping")
|
||||
driver.block_device_info_get_mapping(vol
|
||||
@ -1552,10 +1722,12 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
conn.volume_driver_method('connect_volume',
|
||||
v['connection_info'],
|
||||
v['mount_device'].rpartition("/")[2])
|
||||
self.mox.StubOutWithMock(conn, 'plug_vifs')
|
||||
conn.plug_vifs(mox.IsA(inst_ref), nw_info)
|
||||
|
||||
# Starting test
|
||||
self.mox.ReplayAll()
|
||||
self.assertEqual(conn.pre_live_migration(vol), None)
|
||||
result = conn.pre_live_migration(c, inst_ref, vol, nw_info)
|
||||
self.assertEqual(result, None)
|
||||
|
||||
def test_pre_block_migration_works_correctly(self):
|
||||
"""Confirms pre_block_migration works correctly."""
|
||||
|
@ -455,32 +455,6 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
|
||||
instance_ref, network_info = self._get_running_instance()
|
||||
self.connection.refresh_provider_fw_rules()
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_compare_cpu(self):
|
||||
cpu_info = '''{ "topology": {
|
||||
"sockets": 1,
|
||||
"cores": 2,
|
||||
"threads": 1 },
|
||||
"features": [
|
||||
"xtpr",
|
||||
"tm2",
|
||||
"est",
|
||||
"vmx",
|
||||
"ds_cpl",
|
||||
"monitor",
|
||||
"pbe",
|
||||
"tm",
|
||||
"ht",
|
||||
"ss",
|
||||
"acpi",
|
||||
"ds",
|
||||
"vme"],
|
||||
"arch": "x86_64",
|
||||
"model": "Penryn",
|
||||
"vendor": "Intel" }'''
|
||||
|
||||
self.connection.compare_cpu(cpu_info)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_ensure_filtering_for_instance(self):
|
||||
instance_ref = test_utils.get_test_instance()
|
||||
|
Loading…
Reference in New Issue
Block a user