fixed based on reviewer's comment.
This commit is contained in:
@@ -637,8 +637,8 @@ class ServiceCommands(object):
|
||||
"args": {"host": host}})
|
||||
|
||||
if type(result) != dict:
|
||||
print 'Unexpected error occurs'
|
||||
print '[Result]', result
|
||||
print _('An unexpected error has occurred.')
|
||||
print _('[Result]'), result
|
||||
else:
|
||||
cpu = result['resource']['vcpus']
|
||||
mem = result['resource']['memory_mb']
|
||||
@@ -667,7 +667,7 @@ class ServiceCommands(object):
|
||||
ctxt = context.get_admin_context()
|
||||
service_refs = db.service_get_all_by_host(ctxt, host)
|
||||
if len(service_refs) <= 0:
|
||||
raise exception.Invalid(_('%s does not exists.') % host)
|
||||
raise exception.Invalid(_('%s does not exist.') % host)
|
||||
|
||||
service_refs = [s for s in service_refs if s['topic'] == 'compute']
|
||||
if len(service_refs) <= 0:
|
||||
|
||||
@@ -64,7 +64,7 @@ flags.DEFINE_integer('password_length', 12,
|
||||
flags.DEFINE_string('console_host', socket.gethostname(),
|
||||
'Console proxy host to use to connect to instances on'
|
||||
'this host.')
|
||||
flags.DEFINE_string('live_migration_retry_count', 30,
|
||||
flags.DEFINE_integer('live_migration_retry_count', 30,
|
||||
("Retry count needed in live_migration."
|
||||
" sleep 1 sec for each count"))
|
||||
|
||||
@@ -757,8 +757,9 @@ class ComputeManager(manager.Manager):
|
||||
dirpath = FLAGS.instances_path
|
||||
fd, tmp_file = tempfile.mkstemp(dir=dirpath)
|
||||
LOG.debug(_("Creating tmpfile %s to notify to other "
|
||||
"compute node that they mounts same storage.") % tmp_file)
|
||||
os.fdopen(fd, 'w+').close()
|
||||
"compute nodes that they should mount "
|
||||
"the same storage.") % tmp_file)
|
||||
os.close(fd)
|
||||
return os.path.basename(tmp_file)
|
||||
|
||||
@exception.wrap_exception
|
||||
@@ -812,7 +813,7 @@ class ComputeManager(manager.Manager):
|
||||
# Getting fixed ips
|
||||
fixed_ip = self.db.instance_get_fixed_address(context, instance_id)
|
||||
if not fixed_ip:
|
||||
msg = _("%(instance_id)s(%(ec2_id)s) does'nt have fixed_ip")
|
||||
msg = _("%(instance_id)s(%(ec2_id)s) does not have fixed_ip.")
|
||||
raise exception.NotFound(msg % locals())
|
||||
|
||||
# If any volume is mounted, prepare here.
|
||||
@@ -929,7 +930,7 @@ class ComputeManager(manager.Manager):
|
||||
floating_ip = self.db.instance_get_floating_address(ctxt,
|
||||
instance_id)
|
||||
if not floating_ip:
|
||||
LOG.info(_('floating_ip is not found for %s'), i_name)
|
||||
LOG.info(_('No floating_ip is found for %s.'), i_name)
|
||||
else:
|
||||
floating_ip_ref = self.db.floating_ip_get_by_address(ctxt,
|
||||
floating_ip)
|
||||
@@ -937,7 +938,7 @@ class ComputeManager(manager.Manager):
|
||||
floating_ip_ref['address'],
|
||||
{'host': dest})
|
||||
except exception.NotFound:
|
||||
LOG.info(_('Floating_ip is not found for %s'), i_name)
|
||||
LOG.info(_('No floating_ip is found for %s.'), i_name)
|
||||
except:
|
||||
LOG.error(_("Live migration: Unexpected error:"
|
||||
"%s cannot inherit floating ip..") % i_name)
|
||||
@@ -945,12 +946,11 @@ class ComputeManager(manager.Manager):
|
||||
# Restore instance/volume state
|
||||
self.recover_live_migration(ctxt, instance_ref, dest)
|
||||
|
||||
LOG.info(_('Migrating %(i_name)s to %(dest)s finishes successfully.')
|
||||
LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.')
|
||||
% locals())
|
||||
LOG.info(_("The below error is normally occurs. "
|
||||
"Just check if instance is successfully migrated.\n"
|
||||
"libvir: QEMU error : Domain not found: no domain "
|
||||
"with matching name.."))
|
||||
LOG.info(_("You may see the error \"libvirt: QEMU error: "
|
||||
"Domain not found: no domain with matching name.\" "
|
||||
"This error can be safely ignored."))
|
||||
|
||||
def recover_live_migration(self, ctxt, instance_ref, host=None):
|
||||
"""Recovers Instance/volume state from migrating -> running.
|
||||
|
||||
@@ -192,8 +192,8 @@ def service_get_all_compute_by_host(context, host):
|
||||
all()
|
||||
|
||||
if not result:
|
||||
raise exception.NotFound(_("%s does not exist or not "
|
||||
"compute node.") % host)
|
||||
raise exception.NotFound(_("%s does not exist or is not "
|
||||
"a compute node.") % host)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@@ -226,7 +226,6 @@ class Scheduler(object):
|
||||
"args": {'cpu_info': oservice_ref['cpu_info']}})
|
||||
|
||||
except rpc.RemoteError:
|
||||
ec2_id = instance_ref['hostname']
|
||||
src = instance_ref['host']
|
||||
logging.exception(_("host %(dest)s is not compatible with "
|
||||
"original host %(src)s.") % locals())
|
||||
@@ -259,8 +258,9 @@ class Scheduler(object):
|
||||
mem_avail = mem_total - mem_used
|
||||
mem_inst = instance_ref['memory_mb']
|
||||
if mem_avail <= mem_inst:
|
||||
raise exception.NotEmpty(_("%(ec2_id)s is not capable to "
|
||||
"migrate %(dest)s (host:%(mem_avail)s "
|
||||
raise exception.NotEmpty(_("Unable to migrate %(ec2_id)s "
|
||||
"to destination: %(dest)s "
|
||||
"(host:%(mem_avail)s "
|
||||
"<= instance:%(mem_inst)s)")
|
||||
% locals())
|
||||
|
||||
@@ -292,7 +292,7 @@ class Scheduler(object):
|
||||
|
||||
except rpc.RemoteError:
|
||||
ipath = FLAGS.instances_path
|
||||
logging.error(_("Cannot comfirm tmpfile at %(ipath)s is on "
|
||||
logging.error(_("Cannot confirm tmpfile at %(ipath)s is on "
|
||||
"same shared storage between %(src)s "
|
||||
"and %(dest)s.") % locals())
|
||||
raise
|
||||
|
||||
@@ -89,14 +89,14 @@ class ComputeTestCase(test.TestCase):
|
||||
Use this when any testcase executed later than test_run_terminate
|
||||
"""
|
||||
vol1 = models.Volume()
|
||||
vol1.__setitem__('id', 1)
|
||||
vol1['id'] = 1
|
||||
vol2 = models.Volume()
|
||||
vol2.__setitem__('id', 2)
|
||||
vol2['id'] = 2
|
||||
instance_ref = models.Instance()
|
||||
instance_ref.__setitem__('id', 1)
|
||||
instance_ref.__setitem__('volumes', [vol1, vol2])
|
||||
instance_ref.__setitem__('hostname', 'i-00000001')
|
||||
instance_ref.__setitem__('host', 'dummy')
|
||||
instance_ref['id'] = 1
|
||||
instance_ref['volumes'] = [vol1, vol2]
|
||||
instance_ref['hostname'] = 'i-00000001'
|
||||
instance_ref['host'] = 'dummy'
|
||||
return instance_ref
|
||||
|
||||
def test_create_instance_defaults_display_name(self):
|
||||
@@ -114,9 +114,9 @@ class ComputeTestCase(test.TestCase):
|
||||
"""Make sure create associates security groups"""
|
||||
group = self._create_group()
|
||||
instance_ref = models.Instance()
|
||||
instance_ref.__setitem__('id', 1)
|
||||
instance_ref.__setitem__('volumes', [{'id': 1}, {'id': 2}])
|
||||
instance_ref.__setitem__('hostname', 'i-00000001')
|
||||
instance_ref['id'] = 1
|
||||
instance_ref['volumes'] = [{'id': 1}, {'id': 2}]
|
||||
instance_ref['hostname'] = 'i-00000001'
|
||||
return instance_ref
|
||||
|
||||
def test_create_instance_defaults_display_name(self):
|
||||
@@ -390,7 +390,7 @@ class ComputeTestCase(test.TestCase):
|
||||
def test_pre_live_migration_instance_has_no_volume(self):
|
||||
"""Confirm log meg when instance doesn't mount any volumes."""
|
||||
i_ref = self._get_dummy_instance()
|
||||
i_ref.__setitem__('volumes', [])
|
||||
i_ref['volumes'] = []
|
||||
c = context.get_admin_context()
|
||||
|
||||
self._setup_other_managers()
|
||||
@@ -501,7 +501,7 @@ class ComputeTestCase(test.TestCase):
|
||||
def test_live_migration_dest_raises_exception_no_volume(self):
|
||||
"""Same as above test(input pattern is different) """
|
||||
i_ref = self._get_dummy_instance()
|
||||
i_ref.__setitem__('volumes', [])
|
||||
i_ref['volumes'] = []
|
||||
c = context.get_admin_context()
|
||||
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
|
||||
|
||||
@@ -526,7 +526,7 @@ class ComputeTestCase(test.TestCase):
|
||||
def test_live_migration_works_correctly_no_volume(self):
|
||||
"""Confirm live_migration() works as expected correctly."""
|
||||
i_ref = self._get_dummy_instance()
|
||||
i_ref.__setitem__('volumes', [])
|
||||
i_ref['volumes'] = []
|
||||
c = context.get_admin_context()
|
||||
topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
|
||||
|
||||
|
||||
@@ -119,7 +119,8 @@ class SchedulerTestCase(test.TestCase):
|
||||
try:
|
||||
scheduler.show_host_resources(ctxt, dest)
|
||||
except exception.NotFound, e:
|
||||
c1 = (0 <= e.message.find('does not exist or not compute node'))
|
||||
c1 = (e.message.find(_("does not exist or is not a "
|
||||
"compute node.")) >= 0)
|
||||
self.assertTrue(c1)
|
||||
|
||||
def _dic_is_equal(self, dic1, dic2, keys=None):
|
||||
@@ -786,7 +787,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
i_ref,
|
||||
'somewhere')
|
||||
except exception.NotEmpty, e:
|
||||
c = (e.message.find('is not capable to migrate') >= 0)
|
||||
c = (e.message.find('Unable to migrate') >= 0)
|
||||
|
||||
self.assertTrue(c)
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
|
||||
@@ -42,24 +42,6 @@ class FakeManager(manager.Manager):
|
||||
def test_method(self):
|
||||
return 'manager'
|
||||
|
||||
# temporary variable to store host/binary/self.mox
|
||||
# from each method to fake class.
|
||||
global_host = None
|
||||
global_binary = None
|
||||
global_mox = None
|
||||
|
||||
|
||||
class FakeComputeManager(compute_manager.ComputeManager):
|
||||
"""Fake computemanager manager for tests"""
|
||||
|
||||
def __init__(self, compute_driver=None, *args, **kwargs):
|
||||
global ghost, gbinary, gmox
|
||||
self.update_available_resource(mox.IgnoreArg())
|
||||
gmox.ReplayAll()
|
||||
super(FakeComputeManager, self).__init__(compute_driver,
|
||||
*args,
|
||||
**kwargs)
|
||||
|
||||
|
||||
class ExtendedService(service.Service):
|
||||
def test_method(self):
|
||||
@@ -275,37 +257,38 @@ class ServiceTestCase(test.TestCase):
|
||||
"""Confirm compute updates their record of compute-service table."""
|
||||
host = 'foo'
|
||||
binary = 'nova-compute'
|
||||
topic = 'compute1'
|
||||
service_create = {'host': host,
|
||||
topic = 'compute'
|
||||
|
||||
# Any mocks are not working without UnsetStubs() here.
|
||||
self.mox.UnsetStubs()
|
||||
ctxt = context.get_admin_context()
|
||||
service_ref = db.service_create(ctxt, {'host': host,
|
||||
'binary': binary,
|
||||
'topic': topic,
|
||||
'report_count': 0,
|
||||
'availability_zone': 'nova'}
|
||||
service_ref = {'host': host,
|
||||
'binary': binary,
|
||||
'topic': topic,
|
||||
'report_count': 0,
|
||||
'availability_zone': 'nova',
|
||||
'id': 1}
|
||||
|
||||
service.db.service_get_by_args(mox.IgnoreArg(),
|
||||
host,
|
||||
binary).AndRaise(exception.NotFound())
|
||||
service.db.service_create(mox.IgnoreArg(),
|
||||
service_create).AndReturn(service_ref)
|
||||
self.mox.StubOutWithMock(compute_manager.ComputeManager,
|
||||
'update_available_resource')
|
||||
|
||||
global ghost, gbinary, gmox
|
||||
ghost = host
|
||||
gbinary = binary
|
||||
gmox = self.mox
|
||||
|
||||
'topic': topic})
|
||||
serv = service.Service(host,
|
||||
binary,
|
||||
topic,
|
||||
'nova.tests.test_service.FakeComputeManager')
|
||||
# ReplayAll has been executed FakeComputeManager.__init__()
|
||||
#self.mox.ReplayAll()
|
||||
'nova.compute.manager.ComputeManager')
|
||||
|
||||
# This testcase want to test calling update_available_resource.
|
||||
# No need to call periodic call, then below variable must be set 0.
|
||||
serv.report_interval = 0
|
||||
serv.periodic_interval = 0
|
||||
|
||||
# Creating mocks
|
||||
self.mox.StubOutWithMock(service.rpc.Connection, 'instance')
|
||||
service.rpc.Connection.instance(new=mox.IgnoreArg())
|
||||
service.rpc.Connection.instance(new=mox.IgnoreArg())
|
||||
self.mox.StubOutWithMock(serv.manager.driver,
|
||||
'update_available_resource')
|
||||
serv.manager.driver.update_available_resource(mox.IgnoreArg(), host)
|
||||
|
||||
# Just doing start()-stop(), not confirm new db record is created,
|
||||
# because update_available_resource() works only in libvirt environment.
|
||||
# This testcase confirms update_available_resource() is called.
|
||||
# Otherwise, mox complains.
|
||||
self.mox.ReplayAll()
|
||||
serv.start()
|
||||
serv.stop()
|
||||
|
||||
db.service_destroy(ctxt, service_ref['id'])
|
||||
|
||||
@@ -283,7 +283,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.assertEquals(uri, testuri)
|
||||
db.instance_destroy(user_context, instance_ref['id'])
|
||||
|
||||
def tes1t_update_available_resource_works_correctly(self):
|
||||
def test_update_available_resource_works_correctly(self):
|
||||
"""Confirm compute_node table is updated successfully."""
|
||||
org_path = FLAGS.instances_path = ''
|
||||
FLAGS.instances_path = '.'
|
||||
@@ -314,7 +314,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
compute_node = service_ref['compute_node'][0]
|
||||
|
||||
if sys.platform.upper() == 'LINUX2':
|
||||
self.assertTrue(compute_node['vcpus'] > 0)
|
||||
self.assertTrue(compute_node['vcpus'] >= 0)
|
||||
self.assertTrue(compute_node['memory_mb'] > 0)
|
||||
self.assertTrue(compute_node['local_gb'] > 0)
|
||||
self.assertTrue(compute_node['vcpus_used'] == 0)
|
||||
@@ -323,7 +323,7 @@ class LibvirtConnTestCase(test.TestCase):
|
||||
self.assertTrue(len(compute_node['hypervisor_type']) > 0)
|
||||
self.assertTrue(compute_node['hypervisor_version'] > 0)
|
||||
else:
|
||||
self.assertTrue(compute_node['vcpus'] > 0)
|
||||
self.assertTrue(compute_node['vcpus'] >= 0)
|
||||
self.assertTrue(compute_node['memory_mb'] == 0)
|
||||
self.assertTrue(compute_node['local_gb'] > 0)
|
||||
self.assertTrue(compute_node['vcpus_used'] == 0)
|
||||
|
||||
@@ -284,9 +284,10 @@ class AOETestCase(DriverTestCase):
|
||||
self.volume.check_for_export(self.context, self.instance_id)
|
||||
except exception.ProcessExecutionError, e:
|
||||
volume_id = volume_id_list[0]
|
||||
msg = _("""Cannot confirm exported volume id:%(volume_id)s."""
|
||||
"""vblade process for e%(shelf_id)s.%(blade_id)s """
|
||||
"""isn't running.""") % locals()
|
||||
msg = _("Cannot confirm exported volume id:%(volume_id)s. "
|
||||
"vblade process for e%(shelf_id)s.%(blade_id)s "
|
||||
"isn't running.") % locals()
|
||||
|
||||
msg_is_match = (0 <= e.message.find(msg))
|
||||
|
||||
self.assertTrue(msg_is_match)
|
||||
|
||||
@@ -860,7 +860,14 @@ class LibvirtConnection(object):
|
||||
|
||||
"""
|
||||
|
||||
# On certain platforms, this will raise a NotImplementedError.
|
||||
try:
|
||||
return multiprocessing.cpu_count()
|
||||
except NotImplementedError:
|
||||
LOG.warn(_("Cannot get the number of cpu, because this "
|
||||
"function is not implemented for this platform. "
|
||||
"This error can be safely ignored for now."))
|
||||
return 0
|
||||
|
||||
def get_memory_mb_total(self):
|
||||
"""Get the total memory size(MB) of physical computer.
|
||||
@@ -1042,9 +1049,9 @@ class LibvirtConnection(object):
|
||||
try:
|
||||
service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
|
||||
except exception.NotFound:
|
||||
msg = _(("""Cannot update compute manager specific info,"""
|
||||
""" Because no service record found."""))
|
||||
raise exception.Invalid(msg)
|
||||
raise exception.Invalid(_("Cannot update compute manager "
|
||||
"specific info, because no service "
|
||||
"record was found."))
|
||||
|
||||
# Updating host information
|
||||
dic = {'vcpus': self.get_vcpu_total(),
|
||||
@@ -1059,11 +1066,11 @@ class LibvirtConnection(object):
|
||||
|
||||
compute_node_ref = service_ref['compute_node']
|
||||
if not compute_node_ref:
|
||||
LOG.info(_('Compute_service record is created for %s ') % host)
|
||||
LOG.info(_('Compute_service record created for %s ') % host)
|
||||
dic['service_id'] = service_ref['id']
|
||||
db.compute_node_create(ctxt, dic)
|
||||
else:
|
||||
LOG.info(_('Compute_service record is updated for %s ') % host)
|
||||
LOG.info(_('Compute_service record updated for %s ') % host)
|
||||
db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
|
||||
|
||||
def compare_cpu(self, cpu_info):
|
||||
@@ -1081,8 +1088,7 @@ class LibvirtConnection(object):
|
||||
|
||||
"""
|
||||
|
||||
LOG.info(_('Checking cpu_info: instance was launched this cpu.\n%s')
|
||||
% cpu_info)
|
||||
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
|
||||
dic = utils.loads(cpu_info)
|
||||
xml = str(Template(self.cpuinfo_xml, searchList=dic))
|
||||
LOG.info(_('to xml...\n:%s ' % xml))
|
||||
|
||||
Reference in New Issue
Block a user