rev439ベースにライブマイグレーションの機能をマージ

このバージョンはEBSなし、CPUフラグのチェックなし
This commit is contained in:
masumotok
2010-12-07 19:25:43 +09:00
parent c04426af91
commit d899ac1cf5
5 changed files with 262 additions and 7 deletions

View File

@@ -76,6 +76,13 @@ from nova import quota
from nova import utils
from nova.auth import manager
from nova.cloudpipe import pipelib
#added by masumotok
from nova import rpc
# added by masumotok
from nova.api.ec2 import cloud
# added by masumotok
from nova.compute import power_state
FLAGS = flags.FLAGS
@@ -424,6 +431,116 @@ class NetworkCommands(object):
int(network_size), int(vlan_start),
int(vpn_start))
# this class is added by masumotok
class InstanceCommands(object):
"""Class for mangaging VM instances."""
def live_migration(self, ec2_id, dest):
"""live_migration"""
logging.basicConfig()
ctxt = context.get_admin_context()
if 'nova.network.manager.VlanManager' != FLAGS.network_manager :
msg = 'Only nova.network.manager.VlanManager is supported now. Sorry!'
raise Exception(msg)
# 1. whether destination host exists
host_ref = db.host_get_by_name(ctxt, dest)
# 2. whether instance exists and running
# try-catch clause is necessary because only internal_id is shown
# when NotFound exception occurs. it isnot understandable to admins.
try :
internal_id = cloud.ec2_id_to_internal_id(ec2_id)
instance_ref = db.instance_get_by_internal_id(ctxt, internal_id)
except exception.NotFound :
print 'Not found instance_id(%s (internal_id:%s))' % ( ec2_id, internal_id)
raise
if power_state.RUNNING != instance_ref['state'] or \
'running' != instance_ref['state_description']:
print 'Instance(%s) is not running' % ec2_id
sys.exit(1)
# 3. the host where instance is running and dst host is not same
if dest == instance_ref['host'] :
print '%s is where %s is running now. choose different host.' \
% (dest, ec2_id)
sys.exit(2)
# 4. live migration
rpc.cast(ctxt,
FLAGS.scheduler_topic,
{ "method": "live_migration",
"args": {"ec2_id": ec2_id,
"dest":dest}})
print 'Finished all procedure. check instance are migrated successfully'
print 'chech status by using euca-describe-instances.'
# this class is created by masumotok
class HostCommands(object):
"""Class for mangaging host(physical nodes)."""
def list(self):
"""describe host list."""
# to supress msg: No handlers could be found for logger "amqplib"
logging.basicConfig()
host_refs = db.host_get_all(context.get_admin_context())
for host_ref in host_refs:
print host_ref['name']
def show(self, host):
"""describe cpu/memory/hdd info for host."""
# to supress msg: No handlers could be found for logger "amqplib"
logging.basicConfig()
result = rpc.call(context.get_admin_context(),
FLAGS.scheduler_topic,
{"method": "show_host_resource",
"args": {"host": host}})
# checing result msg format is necessary, that will have done
# when this feture is included in API.
if dict != type(result):
print 'Unexpected error occurs'
elif not result['ret'] :
print '%s' % result['msg']
else :
cpu = result['phy_resource']['cpu']
mem = result['phy_resource']['memory_mb']
hdd = result['phy_resource']['hdd_gb']
print 'HOST\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)'
print '%s\t\t\t%s\t%s\t%s' % ( host,cpu, mem, hdd)
for p_id, val in result['usage'].items() :
print '%s\t%s\t\t%s\t%s\t%s' % ( host,
p_id,
val['cpu'],
val['memory_mb'],
val['hdd_gb'])
def has_keys(self, dic, keys):
not_found = [ key for key in keys if not dict.has_key(key) ]
return ( (0 == len(not_found)), not_found )
# modified by masumotok
#CATEGORIES = [
# ('user', UserCommands),
# ('project', ProjectCommands),
# ('role', RoleCommands),
# ('shell', ShellCommands),
# ('vpn', VpnCommands),
# ('floating', FloatingIpCommands),
# ('network', NetworkCommands)]
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
@@ -431,8 +548,9 @@ CATEGORIES = [
('shell', ShellCommands),
('vpn', VpnCommands),
('floating', FloatingIpCommands),
('network', NetworkCommands)]
('network', NetworkCommands),
('instance', InstanceCommands),
('host',HostCommands)]
def lazy_match(name, key_value_tuples):
"""Finds all objects that have a key that case insensitively contains

View File

@@ -29,6 +29,10 @@ from nova import flags
from nova import manager
from nova import rpc
from nova import utils
# 3 modules are added by masumotok
from nova import exception
from nova.api.ec2 import cloud
from nova.compute import power_state
FLAGS = flags.FLAGS
flags.DEFINE_string('scheduler_driver',
@@ -66,3 +70,106 @@ class SchedulerManager(manager.Manager):
{"method": method,
"args": kwargs})
logging.debug("Casting to %s %s for %s", topic, host, method)
# created by masumotok
def live_migration(self, context, ec2_id, dest):
""" live migration method"""
# 1. get instance id
internal_id = cloud.ec2_id_to_internal_id(ec2_id)
instance_ref = db.instance_get_by_internal_id(context, internal_id)
instance_id = instance_ref['id']
# 2. check dst host still has enough capacities
if not self.has_enough_resource(context, instance_id, dest):
return False
# 3. change instance_state
db.instance_set_state(context,
instance_id,
power_state.PAUSED,
'migrating')
# 4. request live migration
host = instance_ref['host']
rpc.cast(context,
db.queue_get_for(context, FLAGS.compute_topic, host),
{"method": 'live_migration',
"args": {'instance_id': instance_id,
'dest': dest}})
return True
# this method is created by masumotok
def has_enough_resource(self, context, instance_id, dest):
# get instance information
instance_ref = db.instance_get(context, instance_id)
ec2_id = instance_ref['hostname']
vcpus = instance_ref['vcpus']
mem = instance_ref['memory_mb']
hdd = instance_ref['local_gb']
# get host information
host_ref = db.host_get_by_name(context, dest)
total_cpu = int(host_ref['cpu'])
total_mem = int(host_ref['memory_mb'])
total_hdd = int(host_ref['hdd_gb'])
instances_ref = db.instance_get_all_by_host(context, dest)
for i_ref in instances_ref:
total_cpu -= int(i_ref['vcpus'])
total_mem -= int(i_ref['memory_mb'])
total_hdd -= int(i_ref['local_gb'])
# check host has enough information
logging.debug('host(%s) remains vcpu:%s mem:%s hdd:%s,' %
(dest, total_cpu, total_mem, total_hdd))
logging.debug('instance(%s) has vcpu:%s mem:%s hdd:%s,' %
(ec2_id, total_cpu, total_mem, total_hdd))
if total_cpu <= vcpus or total_mem <= mem or total_hdd <= hdd:
logging.debug('%s doesnt have enough resource for %s' %
(dest, ec2_id))
return False
logging.debug('%s has enough resource for %s' % (dest, ec2_id))
return True
# this method is created by masumotok
def show_host_resource(self, context, host, *args):
""" show the physical/usage resource given by hosts."""
try:
host_ref = db.host_get_by_name(context, host)
except exception.NotFound:
return {'ret': False, 'msg': 'No such Host'}
except:
raise
# get physical resource information
h_resource = {'cpu': host_ref['cpu'],
'memory_mb': host_ref['memory_mb'],
'hdd_gb': host_ref['hdd_gb']}
# get usage resource information
u_resource = {}
instances_ref = db.instance_get_all_by_host(context, host_ref['name'])
if 0 == len(instances_ref):
return {'ret': True, 'phy_resource': h_resource, 'usage': {}}
project_ids = [i['project_id'] for i in instances_ref]
project_ids = list(set(project_ids))
for p_id in project_ids:
cpu = db.instance_get_vcpu_sum_by_host_and_project(context,
host,
p_id)
mem = db.instance_get_memory_sum_by_host_and_project(context,
host,
p_id)
hdd = db.instance_get_disk_sum_by_host_and_project(context,
host,
p_id)
u_resource[p_id] = {'cpu': cpu, 'memory_mb': mem, 'hdd_gb': hdd}
return {'ret': True, 'phy_resource': h_resource, 'usage': u_resource}

View File

@@ -72,6 +72,14 @@ class Service(object, service.Service):
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
# this try-except operations are added by masumotok
try:
host_ref = db.host_get_by_name(ctxt, self.host)
except exception.NotFound:
host_ref = db.host_create(ctxt, {'name': self.host})
host_ref = self._update_host_ref(ctxt, host_ref)
try:
service_ref = db.service_get_by_args(ctxt,
self.host,
@@ -109,6 +117,20 @@ class Service(object, service.Service):
'report_count': 0})
self.service_id = service_ref['id']
# created by masumotok
def _update_host_ref(self, context, host_ref):
if 0 <= self.manager_class_name.find('ComputeManager'):
cpu = self.manager.get_cpu_number()
memory_mb = self.manager.get_mem_size()
hdd_gb = self.manager.get_hdd_size()
db.host_update(context,
host_ref['id'],
{'cpu': cpu,
'memory_mb': memory_mb,
'hdd_gb': hdd_gb})
return host_ref
def __getattr__(self, key):
manager = self.__dict__.get('manager', None)
return getattr(manager, key)

View File

@@ -133,9 +133,16 @@ def runthis(prompt, cmd, check_exit_code=True):
def generate_uid(topic, size=8):
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for x in xrange(size)]
return '%s-%s' % (topic, ''.join(choices))
#modified by masumotok
#characters = '01234567890abcdefghijklmnopqrstuvwxyz'
#choices = [random.choice(characters) for x in xrange(size)]
#return '%s-%s' % (topic, ''.join(choices))
if topic == "i":
return random.randint(0, 2 ** 28 - 1)
else:
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
choices = [random.choice(characters) for x in xrange(size)]
return '%s-%s' % (topic, ''.join(choices))
def generate_mac():

View File

@@ -25,6 +25,7 @@ from sphinx.setup_command import BuildDoc
from nova.utils import parse_mailmap, str_dict_replace
class local_BuildDoc(BuildDoc):
def run(self):
for builder in ['html', 'man']:
@@ -54,8 +55,8 @@ setup(name='nova',
author='OpenStack',
author_email='nova@lists.launchpad.net',
url='http://www.openstack.org/',
cmdclass={ 'sdist': local_sdist,
'build_sphinx' : local_BuildDoc },
cmdclass={'sdist': local_sdist,
'build_sphinx': local_BuildDoc},
packages=find_packages(exclude=['bin', 'smoketests']),
include_package_data=True,
scripts=['bin/nova-api',