Adding kvm-block-migration feature.
I wrote some description the below URL. I hope it may help for reviewing. <http://etherpad.openstack.org/kvm-block-migration>
This commit is contained in:
1
Authors
1
Authors
@@ -59,6 +59,7 @@ Joshua McKenty <jmckenty@gmail.com>
|
||||
Justin Santa Barbara <justin@fathomdb.com>
|
||||
Justin Shepherd <jshepher@rackspace.com>
|
||||
Kei Masumoto <masumotok@nttdata.co.jp>
|
||||
masumoto<masumotok@nttdata.co.jp>
|
||||
Ken Pepple <ken.pepple@gmail.com>
|
||||
Kevin Bringard <kbringard@attinteractive.com>
|
||||
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
|
||||
|
||||
@@ -834,11 +834,13 @@ class VmCommands(object):
|
||||
instance['availability_zone'],
|
||||
instance['launch_index'])
|
||||
|
||||
@args('--ec2_id', dest='ec2_id', metavar='<ec2 id>', help='EC2 ID')
|
||||
@args('--dest', dest='dest', metavar='<Destanation>',
|
||||
help='destanation node')
|
||||
def live_migration(self, ec2_id, dest):
|
||||
"""Migrates a running instance to a new machine."""
|
||||
def _migration(self, ec2_id, dest, block_migration=False):
|
||||
"""Migrates a running instance to a new machine.
|
||||
:param ec2_id: instance id which comes from euca-describe-instance.
|
||||
:param dest: destination host name.
|
||||
:param block_migration: if True, do block_migration.
|
||||
|
||||
"""
|
||||
|
||||
ctxt = context.get_admin_context()
|
||||
instance_id = ec2utils.ec2_id_to_id(ec2_id)
|
||||
@@ -859,11 +861,28 @@ class VmCommands(object):
|
||||
{"method": "live_migration",
|
||||
"args": {"instance_id": instance_id,
|
||||
"dest": dest,
|
||||
"topic": FLAGS.compute_topic}})
|
||||
"topic": FLAGS.compute_topic,
|
||||
"block_migration": block_migration}})
|
||||
|
||||
print _('Migration of %s initiated.'
|
||||
'Check its progress using euca-describe-instances.') % ec2_id
|
||||
|
||||
@args('--ec2_id', dest='ec2_id', metavar='<ec2 id>', help='EC2 ID')
|
||||
@args('--dest', dest='dest', metavar='<Destanation>',
|
||||
help='destanation node')
|
||||
def live_migration(self, ec2_id, dest):
|
||||
"""Migrates a running instance to a new machine."""
|
||||
|
||||
self._migration(ec2_id, dest)
|
||||
|
||||
@args('--ec2_id', dest='ec2_id', metavar='<ec2 id>', help='EC2 ID')
|
||||
@args('--dest', dest='dest', metavar='<Destanation>',
|
||||
help='destanation node')
|
||||
def block_migration(self, ec2_id, dest):
|
||||
"""Migrates a running instance to a new machine with storage data."""
|
||||
|
||||
self._migration(ec2_id, dest, True)
|
||||
|
||||
|
||||
class ServiceCommands(object):
|
||||
"""Enable and disable running services"""
|
||||
@@ -945,9 +964,19 @@ class ServiceCommands(object):
|
||||
mem_u = result['resource']['memory_mb_used']
|
||||
hdd_u = result['resource']['local_gb_used']
|
||||
|
||||
cpu_sum = 0
|
||||
mem_sum = 0
|
||||
hdd_sum = 0
|
||||
print 'HOST\t\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)'
|
||||
print '%s(total)\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd)
|
||||
print '%s(used)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u)
|
||||
print '%s(used_now)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u)
|
||||
for p_id, val in result['usage'].items():
|
||||
cpu_sum += val['vcpus']
|
||||
mem_sum += val['memory_mb']
|
||||
hdd_sum += val['local_gb']
|
||||
print '%s(used_max)\t\t\t%s\t%s\t%s' % (host, cpu_sum,
|
||||
mem_sum, hdd_sum)
|
||||
|
||||
for p_id, val in result['usage'].items():
|
||||
print '%s\t\t%s\t\t%s\t%s\t%s' % (host,
|
||||
p_id,
|
||||
|
||||
@@ -1226,6 +1226,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||
def check_shared_storage_test_file(self, context, filename):
|
||||
"""Confirms existence of the tmpfile under FLAGS.instances_path.
|
||||
Cannot confirm tmpfile return False.
|
||||
|
||||
:param context: security context
|
||||
:param filename: confirm existence of FLAGS.instances_path/thisfile
|
||||
@@ -1233,7 +1234,9 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
"""
|
||||
tmp_file = os.path.join(FLAGS.instances_path, filename)
|
||||
if not os.path.exists(tmp_file):
|
||||
raise exception.FileNotFound(file_path=tmp_file)
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
|
||||
def cleanup_shared_storage_test_file(self, context, filename):
|
||||
@@ -1256,11 +1259,13 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
"""
|
||||
return self.driver.update_available_resource(context, self.host)
|
||||
|
||||
def pre_live_migration(self, context, instance_id, time=None):
|
||||
def pre_live_migration(self, context, instance_id, time=None,
|
||||
block_migration=False, disk=None):
|
||||
"""Preparations for live migration at dest host.
|
||||
|
||||
:param context: security context
|
||||
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
|
||||
:param block_migration: if true, prepare for block migration
|
||||
|
||||
"""
|
||||
if not time:
|
||||
@@ -1312,17 +1317,24 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
# onto destination host.
|
||||
self.driver.ensure_filtering_rules_for_instance(instance_ref, network_info)
|
||||
|
||||
def live_migration(self, context, instance_id, dest):
|
||||
# Preparation for block migration
|
||||
if block_migration:
|
||||
self.driver.pre_block_migration(context,
|
||||
instance_ref,
|
||||
disk)
|
||||
|
||||
def live_migration(self, context, instance_id,
|
||||
dest, block_migration=False):
|
||||
"""Executing live migration.
|
||||
|
||||
:param context: security context
|
||||
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
|
||||
:param dest: destination host
|
||||
:param block_migration: if true, do block migration
|
||||
|
||||
"""
|
||||
# Get instance for error handling.
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
i_name = instance_ref.name
|
||||
|
||||
try:
|
||||
# Checking volume node is working correctly when any volumes
|
||||
@@ -1333,16 +1345,25 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
{"method": "check_for_export",
|
||||
"args": {'instance_id': instance_id}})
|
||||
|
||||
# Asking dest host to preparing live migration.
|
||||
if block_migration:
|
||||
disk = self.driver.get_instance_disk_info(context,
|
||||
instance_ref)
|
||||
else:
|
||||
disk = None
|
||||
|
||||
rpc.call(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, dest),
|
||||
{"method": "pre_live_migration",
|
||||
"args": {'instance_id': instance_id}})
|
||||
"args": {'instance_id': instance_id,
|
||||
'block_migration': block_migration,
|
||||
'disk': disk}})
|
||||
|
||||
except Exception:
|
||||
i_name = instance_ref.name
|
||||
msg = _("Pre live migration for %(i_name)s failed at %(dest)s")
|
||||
LOG.error(msg % locals())
|
||||
self.recover_live_migration(context, instance_ref)
|
||||
self.rollback_live_migration(context, instance_ref,
|
||||
dest, block_migration)
|
||||
raise
|
||||
|
||||
# Executing live migration
|
||||
@@ -1350,9 +1371,11 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
# nothing must be recovered in this version.
|
||||
self.driver.live_migration(context, instance_ref, dest,
|
||||
self.post_live_migration,
|
||||
self.recover_live_migration)
|
||||
self.rollback_live_migration,
|
||||
block_migration)
|
||||
|
||||
def post_live_migration(self, ctxt, instance_ref, dest):
|
||||
def post_live_migration(self, ctxt, instance_ref,
|
||||
dest, block_migration=False):
|
||||
"""Post operations for live migration.
|
||||
|
||||
This method is called from live_migration
|
||||
@@ -1361,6 +1384,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
:param ctxt: security context
|
||||
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
|
||||
:param dest: destination host
|
||||
:param block_migration: if true, do block migration
|
||||
|
||||
"""
|
||||
|
||||
@@ -1403,8 +1427,29 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
"%(i_name)s cannot inherit floating "
|
||||
"ip.\n%(e)s") % (locals()))
|
||||
|
||||
# Restore instance/volume state
|
||||
self.recover_live_migration(ctxt, instance_ref, dest)
|
||||
# Define domain at destination host, without doing it,
|
||||
# pause/suspend/terminate do not work.
|
||||
rpc.call(ctxt,
|
||||
self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest),
|
||||
{"method": "post_live_migration_at_destination",
|
||||
"args": {'instance_id': instance_ref.id,
|
||||
'block_migration': block_migration}})
|
||||
|
||||
# Restore instance state
|
||||
self.db.instance_update(ctxt,
|
||||
instance_ref['id'],
|
||||
{'state_description': 'running',
|
||||
'state': power_state.RUNNING,
|
||||
'host': dest})
|
||||
# Restore volume state
|
||||
for volume_ref in instance_ref['volumes']:
|
||||
volume_id = volume_ref['id']
|
||||
self.db.volume_update(ctxt, volume_id, {'status': 'in-use'})
|
||||
|
||||
# No instance booting at source host, but instance dir
|
||||
# must be deleted for preparing next block migration
|
||||
if block_migration:
|
||||
self.driver.destroy(instance_ref, network_info)
|
||||
|
||||
LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.')
|
||||
% locals())
|
||||
@@ -1412,31 +1457,64 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
"Domain not found: no domain with matching name.\" "
|
||||
"This error can be safely ignored."))
|
||||
|
||||
def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None):
|
||||
"""Recovers Instance/volume state from migrating -> running.
|
||||
def post_live_migration_at_destination(self, context,
|
||||
instance_id, block_migration=False):
|
||||
"""Post operations for live migration .
|
||||
|
||||
:param ctxt: security context
|
||||
:param context: security context
|
||||
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
|
||||
:param host: DB column value is updated by this hostname.
|
||||
If none, the host instance currently running is selected.
|
||||
:param block_migration: block_migration
|
||||
|
||||
"""
|
||||
if not host:
|
||||
host = instance_ref['host']
|
||||
instance_ref = self.db.instance_get(context, instance_id)
|
||||
LOG.info(_('Post operation of migraton started for %s .')
|
||||
% instance_ref.name)
|
||||
network_info = self._get_instance_nw_info(context, instance_ref)
|
||||
self.driver.post_live_migration_at_destination(context,
|
||||
instance_ref,
|
||||
network_info,
|
||||
block_migration)
|
||||
|
||||
self.db.instance_update(ctxt,
|
||||
def rollback_live_migration(self, context, instance_ref,
|
||||
dest, block_migration):
|
||||
"""Recovers Instance/volume state from migrating -> running.
|
||||
|
||||
:param context: security context
|
||||
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
|
||||
:param dest:
|
||||
This method is called from live migration src host.
|
||||
This param specifies destination host.
|
||||
"""
|
||||
host = instance_ref['host']
|
||||
self.db.instance_update(context,
|
||||
instance_ref['id'],
|
||||
{'state_description': 'running',
|
||||
'state': power_state.RUNNING,
|
||||
'host': host})
|
||||
|
||||
if dest:
|
||||
volume_api = volume.API()
|
||||
for volume_ref in instance_ref['volumes']:
|
||||
volume_id = volume_ref['id']
|
||||
self.db.volume_update(ctxt, volume_id, {'status': 'in-use'})
|
||||
if dest:
|
||||
volume_api.remove_from_compute(ctxt, volume_id, dest)
|
||||
self.db.volume_update(context, volume_id, {'status': 'in-use'})
|
||||
volume.API().remove_from_compute(context, volume_id, dest)
|
||||
|
||||
# Block migration needs empty image at destination host
|
||||
# before migration starts, so if any failure occurs,
|
||||
# any empty images has to be deleted.
|
||||
if block_migration:
|
||||
rpc.cast(context,
|
||||
self.db.queue_get_for(context, FLAGS.compute_topic, dest),
|
||||
{"method": "rollback_live_migration_at_destination",
|
||||
"args": {'instance_id': instance_ref['id']}})
|
||||
|
||||
def rollback_live_migration_at_destination(self, context, instance_id):
|
||||
""" Cleaning up image directory that is created pre_live_migration.
|
||||
|
||||
:param context: security context
|
||||
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
|
||||
"""
|
||||
instances_ref = self.db.instance_get(context, instance_id)
|
||||
network_info = self._get_instance_nw_info(context, instances_ref)
|
||||
self.driver.destroy(instances_ref, network_info)
|
||||
|
||||
def periodic_tasks(self, context=None):
|
||||
"""Tasks to be run at a periodic interval."""
|
||||
|
||||
@@ -570,27 +570,6 @@ def instance_add_security_group(context, instance_id, security_group_id):
|
||||
security_group_id)
|
||||
|
||||
|
||||
def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
|
||||
"""Get instances.vcpus by host and project."""
|
||||
return IMPL.instance_get_vcpu_sum_by_host_and_project(context,
|
||||
hostname,
|
||||
proj_id)
|
||||
|
||||
|
||||
def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
|
||||
"""Get amount of memory by host and project."""
|
||||
return IMPL.instance_get_memory_sum_by_host_and_project(context,
|
||||
hostname,
|
||||
proj_id)
|
||||
|
||||
|
||||
def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
|
||||
"""Get total amount of disk by host and project."""
|
||||
return IMPL.instance_get_disk_sum_by_host_and_project(context,
|
||||
hostname,
|
||||
proj_id)
|
||||
|
||||
|
||||
def instance_action_create(context, values):
|
||||
"""Create an instance action from the values dictionary."""
|
||||
return IMPL.instance_action_create(context, values)
|
||||
|
||||
@@ -1482,45 +1482,6 @@ def instance_add_security_group(context, instance_id, security_group_id):
|
||||
instance_ref.save(session=session)
|
||||
|
||||
|
||||
@require_context
|
||||
def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
|
||||
session = get_session()
|
||||
result = session.query(models.Instance).\
|
||||
filter_by(host=hostname).\
|
||||
filter_by(project_id=proj_id).\
|
||||
filter_by(deleted=False).\
|
||||
value(func.sum(models.Instance.vcpus))
|
||||
if not result:
|
||||
return 0
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
|
||||
session = get_session()
|
||||
result = session.query(models.Instance).\
|
||||
filter_by(host=hostname).\
|
||||
filter_by(project_id=proj_id).\
|
||||
filter_by(deleted=False).\
|
||||
value(func.sum(models.Instance.memory_mb))
|
||||
if not result:
|
||||
return 0
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
|
||||
session = get_session()
|
||||
result = session.query(models.Instance).\
|
||||
filter_by(host=hostname).\
|
||||
filter_by(project_id=proj_id).\
|
||||
filter_by(deleted=False).\
|
||||
value(func.sum(models.Instance.local_gb))
|
||||
if not result:
|
||||
return 0
|
||||
return result
|
||||
|
||||
|
||||
@require_context
|
||||
def instance_action_create(context, values):
|
||||
"""Create an instance action from the values dictionary."""
|
||||
|
||||
@@ -127,14 +127,14 @@ class ComputeNode(BASE, NovaBase):
|
||||
'ComputeNode.service_id == Service.id,'
|
||||
'ComputeNode.deleted == False)')
|
||||
|
||||
vcpus = Column(Integer, nullable=True)
|
||||
memory_mb = Column(Integer, nullable=True)
|
||||
local_gb = Column(Integer, nullable=True)
|
||||
vcpus_used = Column(Integer, nullable=True)
|
||||
memory_mb_used = Column(Integer, nullable=True)
|
||||
local_gb_used = Column(Integer, nullable=True)
|
||||
hypervisor_type = Column(Text, nullable=True)
|
||||
hypervisor_version = Column(Integer, nullable=True)
|
||||
vcpus = Column(Integer)
|
||||
memory_mb = Column(Integer)
|
||||
local_gb = Column(Integer)
|
||||
vcpus_used = Column(Integer)
|
||||
memory_mb_used = Column(Integer)
|
||||
local_gb_used = Column(Integer)
|
||||
hypervisor_type = Column(Text)
|
||||
hypervisor_version = Column(Integer)
|
||||
|
||||
# Note(masumotok): Expected Strings example:
|
||||
#
|
||||
|
||||
@@ -273,6 +273,11 @@ class DestinationHypervisorTooOld(Invalid):
|
||||
"has been provided.")
|
||||
|
||||
|
||||
class DestinationDiskExists(Invalid):
|
||||
message = _("The supplied disk path (%(path)s) already exists, "
|
||||
"it is expected not to exist.")
|
||||
|
||||
|
||||
class InvalidDevicePath(Invalid):
|
||||
message = _("The supplied device path (%(path)s) is invalid.")
|
||||
|
||||
@@ -699,6 +704,10 @@ class InstanceExists(Duplicate):
|
||||
message = _("Instance %(name)s already exists.")
|
||||
|
||||
|
||||
class InvalidSharedStorage(NovaException):
|
||||
message = _("%(path)s is on shared storage: %(reason)s")
|
||||
|
||||
|
||||
class MigrationError(NovaException):
|
||||
message = _("Migration error") + ": %(reason)s"
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.compute import power_state
|
||||
from nova.api.ec2 import ec2utils
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -78,7 +79,8 @@ class Scheduler(object):
|
||||
"""Must override at least this method for scheduler to work."""
|
||||
raise NotImplementedError(_("Must implement a fallback schedule"))
|
||||
|
||||
def schedule_live_migration(self, context, instance_id, dest):
|
||||
def schedule_live_migration(self, context, instance_id, dest,
|
||||
block_migration=False):
|
||||
"""Live migration scheduling method.
|
||||
|
||||
:param context:
|
||||
@@ -87,9 +89,7 @@ class Scheduler(object):
|
||||
:return:
|
||||
The host where instance is running currently.
|
||||
Then scheduler send request that host.
|
||||
|
||||
"""
|
||||
|
||||
# Whether instance exists and is running.
|
||||
instance_ref = db.instance_get(context, instance_id)
|
||||
|
||||
@@ -97,10 +97,11 @@ class Scheduler(object):
|
||||
self._live_migration_src_check(context, instance_ref)
|
||||
|
||||
# Checking destination host.
|
||||
self._live_migration_dest_check(context, instance_ref, dest)
|
||||
|
||||
self._live_migration_dest_check(context, instance_ref,
|
||||
dest, block_migration)
|
||||
# Common checking.
|
||||
self._live_migration_common_check(context, instance_ref, dest)
|
||||
self._live_migration_common_check(context, instance_ref,
|
||||
dest, block_migration)
|
||||
|
||||
# Changing instance_state.
|
||||
db.instance_set_state(context,
|
||||
@@ -130,7 +131,8 @@ class Scheduler(object):
|
||||
# Checking instance is running.
|
||||
if (power_state.RUNNING != instance_ref['state'] or \
|
||||
'running' != instance_ref['state_description']):
|
||||
raise exception.InstanceNotRunning(instance_id=instance_ref['id'])
|
||||
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
|
||||
raise exception.InstanceNotRunning(instance_id=instance_id)
|
||||
|
||||
# Checing volume node is running when any volumes are mounted
|
||||
# to the instance.
|
||||
@@ -147,7 +149,8 @@ class Scheduler(object):
|
||||
if not self.service_is_up(services[0]):
|
||||
raise exception.ComputeServiceUnavailable(host=src)
|
||||
|
||||
def _live_migration_dest_check(self, context, instance_ref, dest):
|
||||
def _live_migration_dest_check(self, context, instance_ref, dest,
|
||||
block_migration):
|
||||
"""Live migration check routine (for destination host).
|
||||
|
||||
:param context: security context
|
||||
@@ -168,16 +171,18 @@ class Scheduler(object):
|
||||
# and dest is not same.
|
||||
src = instance_ref['host']
|
||||
if dest == src:
|
||||
raise exception.UnableToMigrateToSelf(
|
||||
instance_id=instance_ref['id'],
|
||||
host=dest)
|
||||
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
|
||||
raise exception.UnableToMigrateToSelf(instance_id=instance_id,
|
||||
host=dest)
|
||||
|
||||
# Checking dst host still has enough capacities.
|
||||
self.assert_compute_node_has_enough_resources(context,
|
||||
instance_ref,
|
||||
dest)
|
||||
dest,
|
||||
block_migration)
|
||||
|
||||
def _live_migration_common_check(self, context, instance_ref, dest):
|
||||
def _live_migration_common_check(self, context, instance_ref, dest,
|
||||
block_migration):
|
||||
"""Live migration common check routine.
|
||||
|
||||
Below checkings are followed by
|
||||
@@ -186,11 +191,26 @@ class Scheduler(object):
|
||||
:param context: security context
|
||||
:param instance_ref: nova.db.sqlalchemy.models.Instance object
|
||||
:param dest: destination host
|
||||
:param block_migration if True, check for block_migration.
|
||||
|
||||
"""
|
||||
|
||||
# Checking shared storage connectivity
|
||||
self.mounted_on_same_shared_storage(context, instance_ref, dest)
|
||||
# if block migration, instances_paths should not be on shared storage.
|
||||
try:
|
||||
self.mounted_on_same_shared_storage(context, instance_ref, dest)
|
||||
if block_migration:
|
||||
reason = _("Block migration can not be used "
|
||||
"with shared storage.")
|
||||
raise exception.InvalidSharedStorage(reason=reason, path=dest)
|
||||
except exception.FileNotFound:
|
||||
if not block_migration:
|
||||
src = instance_ref['host']
|
||||
ipath = FLAGS.instances_path
|
||||
logging.error(_("Cannot confirm tmpfile at %(ipath)s is on "
|
||||
"same shared storage between %(src)s "
|
||||
"and %(dest)s.") % locals())
|
||||
raise
|
||||
|
||||
# Checking dest exists.
|
||||
dservice_refs = db.service_get_all_compute_by_host(context, dest)
|
||||
@@ -229,14 +249,26 @@ class Scheduler(object):
|
||||
"original host %(src)s.") % locals())
|
||||
raise
|
||||
|
||||
def assert_compute_node_has_enough_resources(self, context,
|
||||
instance_ref, dest):
|
||||
def assert_compute_node_has_enough_resources(self, context, instance_ref,
|
||||
dest, block_migration):
|
||||
|
||||
"""Checks if destination host has enough resource for live migration.
|
||||
|
||||
Currently, only memory checking has been done.
|
||||
If storage migration(block migration, meaning live-migration
|
||||
without any shared storage) will be available, local storage
|
||||
checking is also necessary.
|
||||
:param context: security context
|
||||
:param instance_ref: nova.db.sqlalchemy.models.Instance object
|
||||
:param dest: destination host
|
||||
:param block_migration: if True, disk checking has been done
|
||||
|
||||
"""
|
||||
self.assert_compute_node_has_enough_memory(context, instance_ref, dest)
|
||||
if not block_migration:
|
||||
return
|
||||
self.assert_compute_node_has_enough_disk(context, instance_ref, dest)
|
||||
|
||||
def assert_compute_node_has_enough_memory(self, context,
|
||||
instance_ref, dest):
|
||||
"""Checks if destination host has enough memory for live migration.
|
||||
|
||||
|
||||
:param context: security context
|
||||
:param instance_ref: nova.db.sqlalchemy.models.Instance object
|
||||
@@ -244,23 +276,70 @@ class Scheduler(object):
|
||||
|
||||
"""
|
||||
|
||||
# Getting instance information
|
||||
hostname = instance_ref['hostname']
|
||||
# Getting total available memory and disk of host
|
||||
avail = self._get_compute_info(context, dest, 'memory_mb')
|
||||
|
||||
# Getting host information
|
||||
service_refs = db.service_get_all_compute_by_host(context, dest)
|
||||
compute_node_ref = service_refs[0]['compute_node'][0]
|
||||
# Getting total used memory and disk of host
|
||||
# It should be sum of memories that are assigned as max value,
|
||||
# because overcommiting is risky.
|
||||
used = 0
|
||||
instance_refs = db.instance_get_all_by_host(context, dest)
|
||||
used_list = [i['memory_mb'] for i in instance_refs]
|
||||
if used_list:
|
||||
used = reduce(lambda x, y: x + y, used_list)
|
||||
|
||||
mem_total = int(compute_node_ref['memory_mb'])
|
||||
mem_used = int(compute_node_ref['memory_mb_used'])
|
||||
mem_avail = mem_total - mem_used
|
||||
mem_inst = instance_ref['memory_mb']
|
||||
if mem_avail <= mem_inst:
|
||||
reason = _("Unable to migrate %(hostname)s to destination: "
|
||||
"%(dest)s (host:%(mem_avail)s <= instance:"
|
||||
"%(mem_inst)s)")
|
||||
avail = avail - used
|
||||
if avail <= mem_inst:
|
||||
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
|
||||
reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
|
||||
"Lack of disk(host:%(avail)s <= instance:%(mem_inst)s)")
|
||||
raise exception.MigrationError(reason=reason % locals())
|
||||
|
||||
def assert_compute_node_has_enough_disk(self, context,
|
||||
instance_ref, dest):
|
||||
"""Checks if destination host has enough disk for block migration.
|
||||
|
||||
:param context: security context
|
||||
:param instance_ref: nova.db.sqlalchemy.models.Instance object
|
||||
:param dest: destination host
|
||||
|
||||
"""
|
||||
|
||||
# Getting total available memory and disk of host
|
||||
avail = self._get_compute_info(context, dest, 'local_gb')
|
||||
|
||||
# Getting total used memory and disk of host
|
||||
# It should be sum of disks that are assigned as max value
|
||||
# because overcommiting is risky.
|
||||
used = 0
|
||||
instance_refs = db.instance_get_all_by_host(context, dest)
|
||||
used_list = [i['local_gb'] for i in instance_refs]
|
||||
if used_list:
|
||||
used = reduce(lambda x, y: x + y, used_list)
|
||||
|
||||
disk_inst = instance_ref['local_gb']
|
||||
avail = avail - used
|
||||
if avail <= disk_inst:
|
||||
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
|
||||
reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
|
||||
"Lack of disk(host:%(avail)s "
|
||||
"<= instance:%(disk_inst)s)")
|
||||
raise exception.MigrationError(reason=reason % locals())
|
||||
|
||||
def _get_compute_info(self, context, host, key):
|
||||
"""get compute node's infomation specified by key
|
||||
|
||||
:param context: security context
|
||||
:param host: hostname(must be compute node)
|
||||
:param key: column name of compute_nodes
|
||||
:return: value specified by key
|
||||
|
||||
"""
|
||||
compute_node_ref = db.service_get_all_compute_by_host(context, host)
|
||||
compute_node_ref = compute_node_ref[0]['compute_node'][0]
|
||||
return compute_node_ref[key]
|
||||
|
||||
def mounted_on_same_shared_storage(self, context, instance_ref, dest):
|
||||
"""Check if the src and dest host mount same shared storage.
|
||||
|
||||
@@ -283,15 +362,13 @@ class Scheduler(object):
|
||||
{"method": 'create_shared_storage_test_file'})
|
||||
|
||||
# make sure existence at src host.
|
||||
rpc.call(context, src_t,
|
||||
{"method": 'check_shared_storage_test_file',
|
||||
"args": {'filename': filename}})
|
||||
ret = rpc.call(context, src_t,
|
||||
{"method": 'check_shared_storage_test_file',
|
||||
"args": {'filename': filename}})
|
||||
if not ret:
|
||||
raise exception.FileNotFound(file_path=filename)
|
||||
|
||||
except rpc.RemoteError:
|
||||
ipath = FLAGS.instances_path
|
||||
logging.error(_("Cannot confirm tmpfile at %(ipath)s is on "
|
||||
"same shared storage between %(src)s "
|
||||
"and %(dest)s.") % locals())
|
||||
except exception.FileNotFound:
|
||||
raise
|
||||
|
||||
finally:
|
||||
|
||||
@@ -114,7 +114,7 @@ class SchedulerManager(manager.Manager):
|
||||
# NOTE (masumotok) : This method should be moved to nova.api.ec2.admin.
|
||||
# Based on bexar design summit discussion,
|
||||
# just put this here for bexar release.
|
||||
def show_host_resources(self, context, host, *args):
|
||||
def show_host_resources(self, context, host):
|
||||
"""Shows the physical/usage resource given by hosts.
|
||||
|
||||
:param context: security context
|
||||
@@ -122,43 +122,45 @@ class SchedulerManager(manager.Manager):
|
||||
:returns:
|
||||
example format is below.
|
||||
{'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
|
||||
D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048}
|
||||
D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048,
|
||||
'vcpus_used': 12, 'memory_mb_used': 10240,
|
||||
'local_gb_used': 64}
|
||||
|
||||
"""
|
||||
|
||||
# Getting compute node info and related instances info
|
||||
compute_ref = db.service_get_all_compute_by_host(context, host)
|
||||
compute_ref = compute_ref[0]
|
||||
|
||||
# Getting physical resource information
|
||||
compute_node_ref = compute_ref['compute_node'][0]
|
||||
resource = {'vcpus': compute_node_ref['vcpus'],
|
||||
'memory_mb': compute_node_ref['memory_mb'],
|
||||
'local_gb': compute_node_ref['local_gb'],
|
||||
'vcpus_used': compute_node_ref['vcpus_used'],
|
||||
'memory_mb_used': compute_node_ref['memory_mb_used'],
|
||||
'local_gb_used': compute_node_ref['local_gb_used']}
|
||||
|
||||
# Getting usage resource information
|
||||
usage = {}
|
||||
instance_refs = db.instance_get_all_by_host(context,
|
||||
compute_ref['host'])
|
||||
|
||||
# Getting total available/used resource
|
||||
compute_ref = compute_ref['compute_node'][0]
|
||||
resource = {'vcpus': compute_ref['vcpus'],
|
||||
'memory_mb': compute_ref['memory_mb'],
|
||||
'local_gb': compute_ref['local_gb'],
|
||||
'vcpus_used': compute_ref['vcpus_used'],
|
||||
'memory_mb_used': compute_ref['memory_mb_used'],
|
||||
'local_gb_used': compute_ref['local_gb_used']}
|
||||
usage = dict()
|
||||
if not instance_refs:
|
||||
return {'resource': resource, 'usage': usage}
|
||||
|
||||
# Getting usage resource per project
|
||||
project_ids = [i['project_id'] for i in instance_refs]
|
||||
project_ids = list(set(project_ids))
|
||||
for project_id in project_ids:
|
||||
vcpus = db.instance_get_vcpu_sum_by_host_and_project(context,
|
||||
host,
|
||||
project_id)
|
||||
mem = db.instance_get_memory_sum_by_host_and_project(context,
|
||||
host,
|
||||
project_id)
|
||||
hdd = db.instance_get_disk_sum_by_host_and_project(context,
|
||||
host,
|
||||
project_id)
|
||||
usage[project_id] = {'vcpus': int(vcpus),
|
||||
'memory_mb': int(mem),
|
||||
'local_gb': int(hdd)}
|
||||
vcpus = [i['vcpus'] for i in instance_refs \
|
||||
if i['project_id'] == project_id]
|
||||
|
||||
mem = [i['memory_mb'] for i in instance_refs \
|
||||
if i['project_id'] == project_id]
|
||||
|
||||
disk = [i['local_gb'] for i in instance_refs \
|
||||
if i['project_id'] == project_id]
|
||||
|
||||
usage[project_id] = {'vcpus': reduce(lambda x, y: x + y, vcpus),
|
||||
'memory_mb': reduce(lambda x, y: x + y, mem),
|
||||
'local_gb': reduce(lambda x, y: x + y, disk)}
|
||||
|
||||
return {'resource': resource, 'usage': usage}
|
||||
|
||||
@@ -127,9 +127,7 @@ class ExtensionControllerTest(test.TestCase):
|
||||
"updated": "2011-01-22T13:25:27-06:00",
|
||||
"description": "The Fox In Socks Extension",
|
||||
"alias": "FOXNSOX",
|
||||
"links": [],
|
||||
},
|
||||
)
|
||||
"links": []})
|
||||
|
||||
def test_list_extensions_xml(self):
|
||||
app = openstack.APIRouterV11()
|
||||
@@ -336,27 +334,18 @@ class ExtensionsXMLSerializerTest(test.TestCase):
|
||||
|
||||
def test_serialize_extenstion(self):
|
||||
serializer = extensions.ExtensionsXMLSerializer()
|
||||
data = {
|
||||
'extension': {
|
||||
'name': 'ext1',
|
||||
'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0',
|
||||
'alias': 'RS-PIE',
|
||||
'updated': '2011-01-22T13:25:27-06:00',
|
||||
'description': 'Adds the capability to share an image.',
|
||||
'links': [
|
||||
{
|
||||
'rel': 'describedby',
|
||||
'type': 'application/pdf',
|
||||
'href': 'http://docs.rack.com/servers/api/ext/cs.pdf',
|
||||
},
|
||||
{
|
||||
'rel': 'describedby',
|
||||
'type': 'application/vnd.sun.wadl+xml',
|
||||
'href': 'http://docs.rack.com/servers/api/ext/cs.wadl',
|
||||
},
|
||||
],
|
||||
},
|
||||
}
|
||||
data = {'extension': {
|
||||
'name': 'ext1',
|
||||
'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0',
|
||||
'alias': 'RS-PIE',
|
||||
'updated': '2011-01-22T13:25:27-06:00',
|
||||
'description': 'Adds the capability to share an image.',
|
||||
'links': [{'rel': 'describedby',
|
||||
'type': 'application/pdf',
|
||||
'href': 'http://docs.rack.com/servers/api/ext/cs.pdf'},
|
||||
{'rel': 'describedby',
|
||||
'type': 'application/vnd.sun.wadl+xml',
|
||||
'href': 'http://docs.rack.com/servers/api/ext/cs.wadl'}]}}
|
||||
|
||||
xml = serializer.serialize(data, 'show')
|
||||
print xml
|
||||
@@ -378,48 +367,30 @@ class ExtensionsXMLSerializerTest(test.TestCase):
|
||||
|
||||
def test_serialize_extensions(self):
|
||||
serializer = extensions.ExtensionsXMLSerializer()
|
||||
data = {
|
||||
"extensions": [
|
||||
{
|
||||
"name": "Public Image Extension",
|
||||
"namespace": "http://foo.com/api/ext/pie/v1.0",
|
||||
"alias": "RS-PIE",
|
||||
"updated": "2011-01-22T13:25:27-06:00",
|
||||
"description": "Adds the capability to share an image.",
|
||||
"links": [
|
||||
{
|
||||
"rel": "describedby",
|
||||
data = {"extensions": [{
|
||||
"name": "Public Image Extension",
|
||||
"namespace": "http://foo.com/api/ext/pie/v1.0",
|
||||
"alias": "RS-PIE",
|
||||
"updated": "2011-01-22T13:25:27-06:00",
|
||||
"description": "Adds the capability to share an image.",
|
||||
"links": [{"rel": "describedby",
|
||||
"type": "application/pdf",
|
||||
"href": "http://foo.com/api/ext/cs-pie.pdf",
|
||||
},
|
||||
{
|
||||
"rel": "describedby",
|
||||
"type": "application/vnd.sun.wadl+xml",
|
||||
"href": "http://foo.com/api/ext/cs-pie.wadl",
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "Cloud Block Storage",
|
||||
"namespace": "http://foo.com/api/ext/cbs/v1.0",
|
||||
"alias": "RS-CBS",
|
||||
"updated": "2011-01-12T11:22:33-06:00",
|
||||
"description": "Allows mounting cloud block storage.",
|
||||
"links": [
|
||||
{
|
||||
"rel": "describedby",
|
||||
"type": "application/pdf",
|
||||
"href": "http://foo.com/api/ext/cs-cbs.pdf",
|
||||
},
|
||||
{
|
||||
"rel": "describedby",
|
||||
"href": "http://foo.com/api/ext/cs-pie.pdf"},
|
||||
{"rel": "describedby",
|
||||
"type": "application/vnd.sun.wadl+xml",
|
||||
"href": "http://foo.com/api/ext/cs-cbs.wadl",
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
"href": "http://foo.com/api/ext/cs-pie.wadl"}]},
|
||||
{"name": "Cloud Block Storage",
|
||||
"namespace": "http://foo.com/api/ext/cbs/v1.0",
|
||||
"alias": "RS-CBS",
|
||||
"updated": "2011-01-12T11:22:33-06:00",
|
||||
"description": "Allows mounting cloud block storage.",
|
||||
"links": [{"rel": "describedby",
|
||||
"type": "application/pdf",
|
||||
"href": "http://foo.com/api/ext/cs-cbs.pdf"},
|
||||
{"rel": "describedby",
|
||||
"type": "application/vnd.sun.wadl+xml",
|
||||
"href": "http://foo.com/api/ext/cs-cbs.wadl"}]}]}
|
||||
|
||||
xml = serializer.serialize(data, 'index')
|
||||
print xml
|
||||
|
||||
@@ -915,86 +915,56 @@ class LimitsViewBuilderV11Test(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
self.view_builder = views.limits.ViewBuilderV11()
|
||||
self.rate_limits = [
|
||||
{
|
||||
"URI": "*",
|
||||
"regex": ".*",
|
||||
"value": 10,
|
||||
"verb": "POST",
|
||||
"remaining": 2,
|
||||
"unit": "MINUTE",
|
||||
"resetTime": 1311272226,
|
||||
},
|
||||
{
|
||||
"URI": "*/servers",
|
||||
"regex": "^/servers",
|
||||
"value": 50,
|
||||
"verb": "POST",
|
||||
"remaining": 10,
|
||||
"unit": "DAY",
|
||||
"resetTime": 1311272226,
|
||||
},
|
||||
]
|
||||
self.absolute_limits = {
|
||||
"metadata_items": 1,
|
||||
"injected_files": 5,
|
||||
"injected_file_content_bytes": 5,
|
||||
}
|
||||
self.rate_limits = [{"URI": "*",
|
||||
"regex": ".*",
|
||||
"value": 10,
|
||||
"verb": "POST",
|
||||
"remaining": 2,
|
||||
"unit": "MINUTE",
|
||||
"resetTime": 1311272226},
|
||||
{"URI": "*/servers",
|
||||
"regex": "^/servers",
|
||||
"value": 50,
|
||||
"verb": "POST",
|
||||
"remaining": 10,
|
||||
"unit": "DAY",
|
||||
"resetTime": 1311272226}]
|
||||
self.absolute_limits = {"metadata_items": 1,
|
||||
"injected_files": 5,
|
||||
"injected_file_content_bytes": 5}
|
||||
|
||||
def tearDown(self):
|
||||
pass
|
||||
|
||||
def test_build_limits(self):
|
||||
expected_limits = {
|
||||
"limits": {
|
||||
"rate": [
|
||||
{
|
||||
"uri": "*",
|
||||
"regex": ".*",
|
||||
"limit": [
|
||||
{
|
||||
"value": 10,
|
||||
"verb": "POST",
|
||||
"remaining": 2,
|
||||
"unit": "MINUTE",
|
||||
"next-available": "2011-07-21T18:17:06Z",
|
||||
},
|
||||
]
|
||||
},
|
||||
{
|
||||
"uri": "*/servers",
|
||||
"regex": "^/servers",
|
||||
"limit": [
|
||||
{
|
||||
"value": 50,
|
||||
"verb": "POST",
|
||||
"remaining": 10,
|
||||
"unit": "DAY",
|
||||
"next-available": "2011-07-21T18:17:06Z",
|
||||
},
|
||||
]
|
||||
},
|
||||
],
|
||||
"absolute": {
|
||||
"maxServerMeta": 1,
|
||||
"maxImageMeta": 1,
|
||||
"maxPersonality": 5,
|
||||
"maxPersonalitySize": 5
|
||||
}
|
||||
}
|
||||
}
|
||||
expected_limits = {"limits": {
|
||||
"rate": [{
|
||||
"uri": "*",
|
||||
"regex": ".*",
|
||||
"limit": [{"value": 10,
|
||||
"verb": "POST",
|
||||
"remaining": 2,
|
||||
"unit": "MINUTE",
|
||||
"next-available": "2011-07-21T18:17:06Z"}]},
|
||||
{"uri": "*/servers",
|
||||
"regex": "^/servers",
|
||||
"limit": [{"value": 50,
|
||||
"verb": "POST",
|
||||
"remaining": 10,
|
||||
"unit": "DAY",
|
||||
"next-available": "2011-07-21T18:17:06Z"}]}],
|
||||
"absolute": {"maxServerMeta": 1,
|
||||
"maxImageMeta": 1,
|
||||
"maxPersonality": 5,
|
||||
"maxPersonalitySize": 5}}}
|
||||
|
||||
output = self.view_builder.build(self.rate_limits,
|
||||
self.absolute_limits)
|
||||
self.assertDictMatch(output, expected_limits)
|
||||
|
||||
def test_build_limits_empty_limits(self):
|
||||
expected_limits = {
|
||||
"limits": {
|
||||
"rate": [],
|
||||
"absolute": {},
|
||||
}
|
||||
}
|
||||
expected_limits = {"limits": {"rate": [],
|
||||
"absolute": {}}}
|
||||
|
||||
abs_limits = {}
|
||||
rate_limits = []
|
||||
@@ -1012,45 +982,28 @@ class LimitsXMLSerializationTest(test.TestCase):
|
||||
|
||||
def test_index(self):
|
||||
serializer = limits.LimitsXMLSerializer()
|
||||
|
||||
fixture = {
|
||||
"limits": {
|
||||
"rate": [
|
||||
{
|
||||
"uri": "*",
|
||||
"regex": ".*",
|
||||
"limit": [
|
||||
{
|
||||
"value": 10,
|
||||
"verb": "POST",
|
||||
"remaining": 2,
|
||||
"unit": "MINUTE",
|
||||
"next-available": "2011-12-15T22:42:45Z",
|
||||
},
|
||||
]
|
||||
},
|
||||
{
|
||||
"uri": "*/servers",
|
||||
"regex": "^/servers",
|
||||
"limit": [
|
||||
{
|
||||
"value": 50,
|
||||
"verb": "POST",
|
||||
"remaining": 10,
|
||||
"unit": "DAY",
|
||||
"next-available": "2011-12-15T22:42:45Z"
|
||||
},
|
||||
]
|
||||
},
|
||||
],
|
||||
"absolute": {
|
||||
"maxServerMeta": 1,
|
||||
"maxImageMeta": 1,
|
||||
"maxPersonality": 5,
|
||||
"maxPersonalitySize": 10240
|
||||
}
|
||||
}
|
||||
}
|
||||
fixture = {"limits": {
|
||||
"rate": [{
|
||||
"uri": "*",
|
||||
"regex": ".*",
|
||||
"limit": [{
|
||||
"value": 10,
|
||||
"verb": "POST",
|
||||
"remaining": 2,
|
||||
"unit": "MINUTE",
|
||||
"next-available": "2011-12-15T22:42:45Z"}]},
|
||||
{"uri": "*/servers",
|
||||
"regex": "^/servers",
|
||||
"limit": [{
|
||||
"value": 50,
|
||||
"verb": "POST",
|
||||
"remaining": 10,
|
||||
"unit": "DAY",
|
||||
"next-available": "2011-12-15T22:42:45Z"}]}],
|
||||
"absolute": {"maxServerMeta": 1,
|
||||
"maxImageMeta": 1,
|
||||
"maxPersonality": 5,
|
||||
"maxPersonalitySize": 10240}}}
|
||||
|
||||
output = serializer.serialize(fixture, 'index')
|
||||
actual = minidom.parseString(output.replace(" ", ""))
|
||||
@@ -1083,12 +1036,9 @@ class LimitsXMLSerializationTest(test.TestCase):
|
||||
def test_index_no_limits(self):
|
||||
serializer = limits.LimitsXMLSerializer()
|
||||
|
||||
fixture = {
|
||||
"limits": {
|
||||
"rate": [],
|
||||
"absolute": {},
|
||||
}
|
||||
}
|
||||
fixture = {"limits": {
|
||||
"rate": [],
|
||||
"absolute": {}}}
|
||||
|
||||
output = serializer.serialize(fixture, 'index')
|
||||
actual = minidom.parseString(output.replace(" ", ""))
|
||||
|
||||
@@ -3056,8 +3056,7 @@ class ServersViewBuilderV11Test(test.TestCase):
|
||||
address_builder,
|
||||
flavor_builder,
|
||||
image_builder,
|
||||
base_url,
|
||||
)
|
||||
base_url)
|
||||
return view_builder
|
||||
|
||||
def test_build_server(self):
|
||||
|
||||
@@ -644,10 +644,13 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check')
|
||||
self.mox.StubOutWithMock(driver_i, '_live_migration_common_check')
|
||||
driver_i._live_migration_src_check(nocare, nocare)
|
||||
driver_i._live_migration_dest_check(nocare, nocare, i_ref['host'])
|
||||
driver_i._live_migration_common_check(nocare, nocare, i_ref['host'])
|
||||
driver_i._live_migration_dest_check(nocare, nocare,
|
||||
i_ref['host'], False)
|
||||
driver_i._live_migration_common_check(nocare, nocare,
|
||||
i_ref['host'], False)
|
||||
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
|
||||
kwargs = {'instance_id': instance_id, 'dest': i_ref['host']}
|
||||
kwargs = {'instance_id': instance_id, 'dest': i_ref['host'],
|
||||
'block_migration': False}
|
||||
rpc.cast(self.context,
|
||||
db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']),
|
||||
{"method": 'live_migration', "args": kwargs})
|
||||
@@ -655,7 +658,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
self.mox.ReplayAll()
|
||||
self.scheduler.live_migration(self.context, FLAGS.compute_topic,
|
||||
instance_id=instance_id,
|
||||
dest=i_ref['host'])
|
||||
dest=i_ref['host'],
|
||||
block_migration=False)
|
||||
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
self.assertTrue(i_ref['state_description'] == 'migrating')
|
||||
@@ -736,7 +740,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
|
||||
self.assertRaises(exception.ComputeServiceUnavailable,
|
||||
self.scheduler.driver._live_migration_dest_check,
|
||||
self.context, i_ref, i_ref['host'])
|
||||
self.context, i_ref, i_ref['host'], False)
|
||||
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
db.service_destroy(self.context, s_ref['id'])
|
||||
@@ -749,7 +753,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
|
||||
self.assertRaises(exception.UnableToMigrateToSelf,
|
||||
self.scheduler.driver._live_migration_dest_check,
|
||||
self.context, i_ref, i_ref['host'])
|
||||
self.context, i_ref, i_ref['host'], False)
|
||||
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
db.service_destroy(self.context, s_ref['id'])
|
||||
@@ -757,15 +761,33 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
def test_live_migration_dest_check_service_lack_memory(self):
|
||||
"""Confirms exception raises when dest doesn't have enough memory."""
|
||||
instance_id = self._create_instance()
|
||||
instance_id2 = self._create_instance(host='somewhere',
|
||||
memory_mb=12)
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
s_ref = self._create_compute_service(host='somewhere',
|
||||
memory_mb_used=12)
|
||||
s_ref = self._create_compute_service(host='somewhere')
|
||||
|
||||
self.assertRaises(exception.MigrationError,
|
||||
self.scheduler.driver._live_migration_dest_check,
|
||||
self.context, i_ref, 'somewhere')
|
||||
self.context, i_ref, 'somewhere', False)
|
||||
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
db.service_destroy(self.context, s_ref['id'])
|
||||
|
||||
def test_block_migration_dest_check_service_lack_disk(self):
|
||||
"""Confirms exception raises when dest doesn't have enough disk."""
|
||||
instance_id = self._create_instance()
|
||||
instance_id2 = self._create_instance(host='somewhere',
|
||||
local_gb=70)
|
||||
i_ref = db.instance_get(self.context, instance_id)
|
||||
s_ref = self._create_compute_service(host='somewhere')
|
||||
|
||||
self.assertRaises(exception.MigrationError,
|
||||
self.scheduler.driver._live_migration_dest_check,
|
||||
self.context, i_ref, 'somewhere', True)
|
||||
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
db.service_destroy(self.context, s_ref['id'])
|
||||
|
||||
def test_live_migration_dest_check_service_works_correctly(self):
|
||||
@@ -777,7 +799,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
|
||||
ret = self.scheduler.driver._live_migration_dest_check(self.context,
|
||||
i_ref,
|
||||
'somewhere')
|
||||
'somewhere',
|
||||
False)
|
||||
self.assertTrue(ret is None)
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
db.service_destroy(self.context, s_ref['id'])
|
||||
@@ -810,9 +833,10 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
"args": {'filename': fpath}})
|
||||
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.SourceHostUnavailable,
|
||||
#self.assertRaises(exception.SourceHostUnavailable,
|
||||
self.assertRaises(exception.FileNotFound,
|
||||
self.scheduler.driver._live_migration_common_check,
|
||||
self.context, i_ref, dest)
|
||||
self.context, i_ref, dest, False)
|
||||
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
db.service_destroy(self.context, s_ref['id'])
|
||||
@@ -836,7 +860,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.InvalidHypervisorType,
|
||||
self.scheduler.driver._live_migration_common_check,
|
||||
self.context, i_ref, dest)
|
||||
self.context, i_ref, dest, False)
|
||||
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
db.service_destroy(self.context, s_ref['id'])
|
||||
@@ -862,7 +886,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
self.mox.ReplayAll()
|
||||
self.assertRaises(exception.DestinationHypervisorTooOld,
|
||||
self.scheduler.driver._live_migration_common_check,
|
||||
self.context, i_ref, dest)
|
||||
self.context, i_ref, dest, False)
|
||||
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
db.service_destroy(self.context, s_ref['id'])
|
||||
@@ -894,7 +918,8 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
try:
|
||||
self.scheduler.driver._live_migration_common_check(self.context,
|
||||
i_ref,
|
||||
dest)
|
||||
dest,
|
||||
False)
|
||||
except rpc.RemoteError, e:
|
||||
c = (e.message.find(_("doesn't have compatibility to")) >= 0)
|
||||
|
||||
|
||||
@@ -714,11 +714,15 @@ class ComputeTestCase(test.TestCase):
|
||||
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
|
||||
AndReturn(topic)
|
||||
rpc.call(c, topic, {"method": "pre_live_migration",
|
||||
"args": {'instance_id': i_ref['id']}})
|
||||
"args": {'instance_id': i_ref['id'],
|
||||
'block_migration': False,
|
||||
'disk': None}})
|
||||
|
||||
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
|
||||
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
|
||||
self.compute.post_live_migration,
|
||||
self.compute.recover_live_migration)
|
||||
self.compute.rollback_live_migration,
|
||||
False)
|
||||
|
||||
self.compute.db = dbmock
|
||||
self.mox.ReplayAll()
|
||||
@@ -739,13 +743,18 @@ class ComputeTestCase(test.TestCase):
|
||||
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
|
||||
AndReturn(topic)
|
||||
rpc.call(c, topic, {"method": "pre_live_migration",
|
||||
"args": {'instance_id': i_ref['id']}}).\
|
||||
"args": {'instance_id': i_ref['id'],
|
||||
'block_migration': False,
|
||||
'disk': None}}).\
|
||||
AndRaise(rpc.RemoteError('', '', ''))
|
||||
dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
|
||||
'state': power_state.RUNNING,
|
||||
'host': i_ref['host']})
|
||||
for v in i_ref['volumes']:
|
||||
dbmock.volume_update(c, v['id'], {'status': 'in-use'})
|
||||
# mock for volume_api.remove_from_compute
|
||||
rpc.call(c, topic, {"method": "remove_volume",
|
||||
"args": {'volume_id': v['id']}})
|
||||
|
||||
self.compute.db = dbmock
|
||||
self.mox.ReplayAll()
|
||||
@@ -766,7 +775,9 @@ class ComputeTestCase(test.TestCase):
|
||||
AndReturn(topic)
|
||||
self.mox.StubOutWithMock(rpc, 'call')
|
||||
rpc.call(c, topic, {"method": "pre_live_migration",
|
||||
"args": {'instance_id': i_ref['id']}}).\
|
||||
"args": {'instance_id': i_ref['id'],
|
||||
'block_migration': False,
|
||||
'disk': None}}).\
|
||||
AndRaise(rpc.RemoteError('', '', ''))
|
||||
dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
|
||||
'state': power_state.RUNNING,
|
||||
@@ -791,11 +802,14 @@ class ComputeTestCase(test.TestCase):
|
||||
dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
|
||||
AndReturn(topic)
|
||||
rpc.call(c, topic, {"method": "pre_live_migration",
|
||||
"args": {'instance_id': i_ref['id']}})
|
||||
"args": {'instance_id': i_ref['id'],
|
||||
'block_migration': False,
|
||||
'disk': None}})
|
||||
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
|
||||
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
|
||||
self.compute.post_live_migration,
|
||||
self.compute.recover_live_migration)
|
||||
self.compute.rollback_live_migration,
|
||||
False)
|
||||
|
||||
self.compute.db = dbmock
|
||||
self.mox.ReplayAll()
|
||||
@@ -829,6 +843,10 @@ class ComputeTestCase(test.TestCase):
|
||||
self.compute.volume_manager.remove_compute_volume(c, v['id'])
|
||||
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
|
||||
self.compute.driver.unfilter_instance(i_ref, [])
|
||||
self.mox.StubOutWithMock(rpc, 'call')
|
||||
rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest),
|
||||
{"method": "post_live_migration_at_destination",
|
||||
"args": {'instance_id': i_ref['id'], 'block_migration': False}})
|
||||
|
||||
# executing
|
||||
self.mox.ReplayAll()
|
||||
|
||||
@@ -21,6 +21,7 @@ import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
from xml.etree.ElementTree import fromstring as xml_to_tree
|
||||
from xml.dom.minidom import parseString as xml_to_dom
|
||||
@@ -696,17 +697,20 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
return vdmock
|
||||
|
||||
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
|
||||
self.mox.StubOutWithMock(self.compute, "recover_live_migration")
|
||||
self.compute.recover_live_migration(self.context, instance_ref,
|
||||
dest='dest')
|
||||
# self.mox.StubOutWithMock(self.compute, "recover_live_migration")
|
||||
self.mox.StubOutWithMock(self.compute, "rollback_live_migration")
|
||||
# self.compute.recover_live_migration(self.context, instance_ref,
|
||||
# dest='dest')
|
||||
self.compute.rollback_live_migration(self.context, instance_ref,
|
||||
'dest', False)
|
||||
|
||||
# Start test
|
||||
#start test
|
||||
self.mox.ReplayAll()
|
||||
conn = connection.LibvirtConnection(False)
|
||||
self.assertRaises(libvirt.libvirtError,
|
||||
conn._live_migration,
|
||||
self.context, instance_ref, 'dest', '',
|
||||
self.compute.recover_live_migration)
|
||||
self.context, instance_ref, 'dest', False,
|
||||
self.compute.rollback_live_migration)
|
||||
|
||||
instance_ref = db.instance_get(self.context, instance_ref['id'])
|
||||
self.assertTrue(instance_ref['state_description'] == 'running')
|
||||
@@ -717,6 +721,95 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
db.volume_destroy(self.context, volume_ref['id'])
|
||||
db.instance_destroy(self.context, instance_ref['id'])
|
||||
|
||||
def test_pre_block_migration_works_correctly(self):
|
||||
"""Confirms pre_block_migration works correctly."""
|
||||
|
||||
# Skip if non-libvirt environment
|
||||
if not self.lazy_load_library_exists():
|
||||
return
|
||||
|
||||
# Replace instances_path since this testcase creates tmpfile
|
||||
tmpdir = tempfile.mkdtemp()
|
||||
store = FLAGS.instances_path
|
||||
FLAGS.instances_path = tmpdir
|
||||
|
||||
# Test data
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
dummyjson = '[{"path": "%s/disk", "local_gb": "10G", "type": "raw"}]'
|
||||
|
||||
# Preparing mocks
|
||||
# qemu-img should be mockd since test environment might not have
|
||||
# large disk space.
|
||||
self.mox.StubOutWithMock(utils, "execute")
|
||||
utils.execute('sudo', 'qemu-img', 'create', '-f', 'raw',
|
||||
'%s/%s/disk' % (tmpdir, instance_ref.name), '10G')
|
||||
|
||||
self.mox.ReplayAll()
|
||||
conn = connection.LibvirtConnection(False)
|
||||
conn.pre_block_migration(self.context, instance_ref,
|
||||
dummyjson % tmpdir)
|
||||
|
||||
self.assertTrue(os.path.exists('%s/%s/' %
|
||||
(tmpdir, instance_ref.name)))
|
||||
|
||||
shutil.rmtree(tmpdir)
|
||||
db.instance_destroy(self.context, instance_ref['id'])
|
||||
# Restore FLAGS.instances_path
|
||||
FLAGS.instances_path = store
|
||||
|
||||
def test_get_instance_disk_info_works_correctly(self):
|
||||
"""Confirms pre_block_migration works correctly."""
|
||||
# Skip if non-libvirt environment
|
||||
if not self.lazy_load_library_exists():
|
||||
return
|
||||
|
||||
# Test data
|
||||
instance_ref = db.instance_create(self.context, self.test_instance)
|
||||
dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>"
|
||||
"<devices>"
|
||||
"<disk type='file'><driver name='qemu' type='raw'/>"
|
||||
"<source file='/test/disk'/>"
|
||||
"<target dev='vda' bus='virtio'/></disk>"
|
||||
"<disk type='file'><driver name='qemu' type='qcow2'/>"
|
||||
"<source file='/test/disk.local'/>"
|
||||
"<target dev='vdb' bus='virtio'/></disk>"
|
||||
"</devices></domain>")
|
||||
|
||||
ret = ("image: /test/disk\nfile format: raw\n"
|
||||
"virtual size: 20G (21474836480 bytes)\ndisk size: 3.1G\n")
|
||||
|
||||
# Preparing mocks
|
||||
vdmock = self.mox.CreateMock(libvirt.virDomain)
|
||||
self.mox.StubOutWithMock(vdmock, "XMLDesc")
|
||||
vdmock.XMLDesc(0).AndReturn(dummyxml)
|
||||
|
||||
def fake_lookup(instance_name):
|
||||
if instance_name == instance_ref.name:
|
||||
return vdmock
|
||||
self.create_fake_libvirt_mock(lookupByName=fake_lookup)
|
||||
|
||||
self.mox.StubOutWithMock(os.path, "getsize")
|
||||
# based on above testdata, one is raw image, so getsize is mocked.
|
||||
os.path.getsize("/test/disk").AndReturn(10 * 1024 * 1024 * 1024)
|
||||
# another is qcow image, so qemu-img should be mocked.
|
||||
self.mox.StubOutWithMock(utils, "execute")
|
||||
utils.execute('sudo', 'qemu-img', 'info', '/test/disk.local').\
|
||||
AndReturn((ret, ''))
|
||||
|
||||
self.mox.ReplayAll()
|
||||
conn = connection.LibvirtConnection(False)
|
||||
info = conn.get_instance_disk_info(self.context, instance_ref)
|
||||
info = utils.loads(info)
|
||||
|
||||
self.assertTrue(info[0]['type'] == 'raw' and
|
||||
info[1]['type'] == 'qcow2' and
|
||||
info[0]['path'] == '/test/disk' and
|
||||
info[1]['path'] == '/test/disk.local' and
|
||||
info[0]['local_gb'] == '10G' and
|
||||
info[1]['local_gb'] == '20G')
|
||||
|
||||
db.instance_destroy(self.context, instance_ref['id'])
|
||||
|
||||
def test_spawn_with_network_info(self):
|
||||
# Skip if non-libvirt environment
|
||||
if not self.lazy_load_library_exists():
|
||||
|
||||
@@ -492,7 +492,7 @@ class FakeConnection(driver.ComputeDriver):
|
||||
raise NotImplementedError('This method is supported only by libvirt.')
|
||||
|
||||
def live_migration(self, context, instance_ref, dest,
|
||||
post_method, recover_method):
|
||||
post_method, recover_method, block_migration=False):
|
||||
"""This method is supported only by libvirt."""
|
||||
return
|
||||
|
||||
|
||||
@@ -117,6 +117,10 @@ flags.DEFINE_string('live_migration_uri',
|
||||
flags.DEFINE_string('live_migration_flag',
|
||||
"VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER",
|
||||
'Define live migration behavior.')
|
||||
flags.DEFINE_string('block_migration_flag',
|
||||
"VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, "
|
||||
"VIR_MIGRATE_NON_SHARED_INC",
|
||||
'Define block migration behavior.')
|
||||
flags.DEFINE_integer('live_migration_bandwidth', 0,
|
||||
'Define live migration behavior')
|
||||
flags.DEFINE_string('qemu_img', 'qemu-img',
|
||||
@@ -727,6 +731,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
|
||||
If cow is True, it will make a CoW image instead of a copy.
|
||||
"""
|
||||
|
||||
if not os.path.exists(target):
|
||||
base_dir = os.path.join(FLAGS.instances_path, '_base')
|
||||
if not os.path.exists(base_dir):
|
||||
@@ -1549,7 +1554,7 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
time.sleep(1)
|
||||
|
||||
def live_migration(self, ctxt, instance_ref, dest,
|
||||
post_method, recover_method):
|
||||
post_method, recover_method, block_migration=False):
|
||||
"""Spawning live_migration operation for distributing high-load.
|
||||
|
||||
:params ctxt: security context
|
||||
@@ -1557,20 +1562,22 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
nova.db.sqlalchemy.models.Instance object
|
||||
instance object that is migrated.
|
||||
:params dest: destination host
|
||||
:params block_migration: destination host
|
||||
:params post_method:
|
||||
post operation method.
|
||||
expected nova.compute.manager.post_live_migration.
|
||||
:params recover_method:
|
||||
recovery method when any exception occurs.
|
||||
expected nova.compute.manager.recover_live_migration.
|
||||
:params block_migration: if true, do block migration.
|
||||
|
||||
"""
|
||||
|
||||
greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
|
||||
post_method, recover_method)
|
||||
post_method, recover_method, block_migration)
|
||||
|
||||
def _live_migration(self, ctxt, instance_ref, dest,
|
||||
post_method, recover_method):
|
||||
def _live_migration(self, ctxt, instance_ref, dest, post_method,
|
||||
recover_method, block_migration=False):
|
||||
"""Do live migration.
|
||||
|
||||
:params ctxt: security context
|
||||
@@ -1589,27 +1596,21 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
|
||||
# Do live migration.
|
||||
try:
|
||||
flaglist = FLAGS.live_migration_flag.split(',')
|
||||
if block_migration:
|
||||
flaglist = FLAGS.block_migration_flag.split(',')
|
||||
else:
|
||||
flaglist = FLAGS.live_migration_flag.split(',')
|
||||
flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
|
||||
logical_sum = reduce(lambda x, y: x | y, flagvals)
|
||||
|
||||
if self.read_only:
|
||||
tmpconn = self._connect(self.libvirt_uri, False)
|
||||
dom = tmpconn.lookupByName(instance_ref.name)
|
||||
dom.migrateToURI(FLAGS.live_migration_uri % dest,
|
||||
logical_sum,
|
||||
None,
|
||||
FLAGS.live_migration_bandwidth)
|
||||
tmpconn.close()
|
||||
else:
|
||||
dom = self._conn.lookupByName(instance_ref.name)
|
||||
dom.migrateToURI(FLAGS.live_migration_uri % dest,
|
||||
logical_sum,
|
||||
None,
|
||||
FLAGS.live_migration_bandwidth)
|
||||
dom = self._conn.lookupByName(instance_ref.name)
|
||||
dom.migrateToURI(FLAGS.live_migration_uri % dest,
|
||||
logical_sum,
|
||||
None,
|
||||
FLAGS.live_migration_bandwidth)
|
||||
|
||||
except Exception:
|
||||
recover_method(ctxt, instance_ref, dest=dest)
|
||||
recover_method(ctxt, instance_ref, dest, block_migration)
|
||||
raise
|
||||
|
||||
# Waiting for completion of live_migration.
|
||||
@@ -1621,11 +1622,150 @@ class LibvirtConnection(driver.ComputeDriver):
|
||||
self.get_info(instance_ref.name)['state']
|
||||
except exception.NotFound:
|
||||
timer.stop()
|
||||
post_method(ctxt, instance_ref, dest)
|
||||
post_method(ctxt, instance_ref, dest, block_migration)
|
||||
|
||||
timer.f = wait_for_live_migration
|
||||
timer.start(interval=0.5, now=True)
|
||||
|
||||
def pre_block_migration(self, ctxt, instance_ref, disk_info_json):
|
||||
"""Preparation block migration.
|
||||
|
||||
:params ctxt: security context
|
||||
:params instance_ref:
|
||||
nova.db.sqlalchemy.models.Instance object
|
||||
instance object that is migrated.
|
||||
:params disk_info_json:
|
||||
json strings specified in get_instance_disk_info
|
||||
|
||||
"""
|
||||
disk_info = utils.loads(disk_info_json)
|
||||
|
||||
# make instance directory
|
||||
instance_dir = os.path.join(FLAGS.instances_path, instance_ref['name'])
|
||||
if os.path.exists(instance_dir):
|
||||
raise exception.DestinationDiskExists(path=instance_dir)
|
||||
os.mkdir(instance_dir)
|
||||
|
||||
for info in disk_info:
|
||||
base = os.path.basename(info['path'])
|
||||
# Get image type and create empty disk image.
|
||||
instance_disk = os.path.join(instance_dir, base)
|
||||
utils.execute('sudo', 'qemu-img', 'create', '-f', info['type'],
|
||||
instance_disk, info['local_gb'])
|
||||
|
||||
# if image has kernel and ramdisk, just download
|
||||
# following normal way.
|
||||
if instance_ref['kernel_id']:
|
||||
user = manager.AuthManager().get_user(instance_ref['user_id'])
|
||||
project = manager.AuthManager().get_project(
|
||||
instance_ref['project_id'])
|
||||
self._fetch_image(nova_context.get_admin_context(),
|
||||
os.path.join(instance_dir, 'kernel'),
|
||||
instance_ref['kernel_id'],
|
||||
user,
|
||||
project)
|
||||
if instance_ref['ramdisk_id']:
|
||||
self._fetch_image(nova_context.get_admin_context(),
|
||||
os.path.join(instance_dir, 'ramdisk'),
|
||||
instance_ref['ramdisk_id'],
|
||||
user,
|
||||
project)
|
||||
|
||||
def post_live_migration_at_destination(self, ctxt,
|
||||
instance_ref,
|
||||
network_info,
|
||||
block_migration):
|
||||
"""Post operation of live migration at destination host.
|
||||
|
||||
:params ctxt: security context
|
||||
:params instance_ref:
|
||||
nova.db.sqlalchemy.models.Instance object
|
||||
instance object that is migrated.
|
||||
:params network_info: instance network infomation
|
||||
:params : block_migration: if true, post operation of block_migraiton.
|
||||
"""
|
||||
# Define migrated instance, otherwise, suspend/destroy does not work.
|
||||
dom_list = self._conn.listDefinedDomains()
|
||||
if instance_ref.name not in dom_list:
|
||||
instance_dir = os.path.join(FLAGS.instances_path,
|
||||
instance_ref.name)
|
||||
xml_path = os.path.join(instance_dir, 'libvirt.xml')
|
||||
# In case of block migration, destination does not have
|
||||
# libvirt.xml
|
||||
if not os.path.isfile(xml_path):
|
||||
xml = self.to_xml(instance_ref, network_info=network_info)
|
||||
f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+')
|
||||
f.write(xml)
|
||||
f.close()
|
||||
# libvirt.xml should be made by to_xml(), but libvirt
|
||||
# does not accept to_xml() result, since uuid is not
|
||||
# included in to_xml() result.
|
||||
dom = self._lookup_by_name(instance_ref.name)
|
||||
self._conn.defineXML(dom.XMLDesc(0))
|
||||
|
||||
def get_instance_disk_info(self, ctxt, instance_ref):
|
||||
"""Preparation block migration.
|
||||
|
||||
:params ctxt: security context
|
||||
:params instance_ref:
|
||||
nova.db.sqlalchemy.models.Instance object
|
||||
instance object that is migrated.
|
||||
:return:
|
||||
json strings with below format.
|
||||
"[{'path':'disk', 'type':'raw', 'local_gb':'10G'},...]"
|
||||
|
||||
"""
|
||||
disk_info = []
|
||||
|
||||
virt_dom = self._lookup_by_name(instance_ref.name)
|
||||
xml = virt_dom.XMLDesc(0)
|
||||
doc = libxml2.parseDoc(xml)
|
||||
disk_nodes = doc.xpathEval('//devices/disk')
|
||||
path_nodes = doc.xpathEval('//devices/disk/source')
|
||||
driver_nodes = doc.xpathEval('//devices/disk/driver')
|
||||
|
||||
for cnt, path_node in enumerate(path_nodes):
|
||||
disk_type = disk_nodes[cnt].get_properties().getContent()
|
||||
path = path_node.get_properties().getContent()
|
||||
|
||||
if disk_type != 'file':
|
||||
LOG.debug(_('skipping %(path)s since it looks like volume') %
|
||||
locals())
|
||||
continue
|
||||
|
||||
# In case of libvirt.xml, disk type can be obtained
|
||||
# by the below statement.
|
||||
# -> disk_type = driver_nodes[cnt].get_properties().getContent()
|
||||
# but this xml is generated by kvm, format is slightly different.
|
||||
disk_type = \
|
||||
driver_nodes[cnt].get_properties().get_next().getContent()
|
||||
if disk_type == 'raw':
|
||||
size = int(os.path.getsize(path))
|
||||
else:
|
||||
out, err = utils.execute('sudo', 'qemu-img', 'info', path)
|
||||
size = [i.split('(')[1].split()[0] for i in out.split('\n')
|
||||
if i.strip().find('virtual size') >= 0]
|
||||
size = int(size[0])
|
||||
|
||||
# block migration needs same/larger size of empty image on the
|
||||
# destination host. since qemu-img creates bit smaller size image
|
||||
# depending on original image size, fixed value is necessary.
|
||||
for unit, divisor in [('G', 1024 ** 3), ('M', 1024 ** 2),
|
||||
('K', 1024), ('', 1)]:
|
||||
if size / divisor == 0:
|
||||
continue
|
||||
if size % divisor != 0:
|
||||
size = size / divisor + 1
|
||||
else:
|
||||
size = size / divisor
|
||||
size = str(size) + unit
|
||||
break
|
||||
|
||||
disk_info.append({'type': disk_type, 'path': path,
|
||||
'local_gb': size})
|
||||
|
||||
return utils.dumps(disk_info)
|
||||
|
||||
def unfilter_instance(self, instance_ref, network_info):
|
||||
"""See comments of same method in firewall_driver."""
|
||||
self.firewall_driver.unfilter_instance(instance_ref,
|
||||
|
||||
@@ -314,7 +314,7 @@ class XenAPIConnection(driver.ComputeDriver):
|
||||
return
|
||||
|
||||
def live_migration(self, context, instance_ref, dest,
|
||||
post_method, recover_method):
|
||||
post_method, recover_method, block_migration=False):
|
||||
"""This method is supported only by libvirt."""
|
||||
return
|
||||
|
||||
|
||||
Reference in New Issue
Block a user