Merged trunk.
This commit is contained in:
@@ -263,6 +263,11 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
|
||||
DEFINE_string('sql_connection',
|
||||
'sqlite:///$state_path/nova.sqlite',
|
||||
'connection string for sql database')
|
||||
DEFINE_string('sql_idle_timeout',
|
||||
'3600',
|
||||
'timeout for idle sql database connections')
|
||||
DEFINE_integer('sql_max_retries', 12, 'sql connection attempts')
|
||||
DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval')
|
||||
|
||||
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
|
||||
'Manager for compute')
|
||||
|
||||
@@ -43,6 +43,19 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest running instances."""
|
||||
instance_ref = db.instance_get(context, instance_id)
|
||||
if instance_ref['availability_zone'] and context.is_admin:
|
||||
zone, _x, host = instance_ref['availability_zone'].partition(':')
|
||||
service = db.service_get_by_args(context.elevated(), host,
|
||||
'nova-compute')
|
||||
if not self.service_is_up(service):
|
||||
raise driver.WillNotSchedule("Host %s is not alive" % host)
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
db.instance_update(context, instance_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
results = db.service_get_all_compute_sorted(context)
|
||||
for result in results:
|
||||
(service, instance_cores) = result
|
||||
@@ -62,6 +75,19 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest volumes."""
|
||||
volume_ref = db.volume_get(context, volume_id)
|
||||
if (':' in volume_ref['availability_zone']) and context.is_admin:
|
||||
zone, _x, host = volume_ref['availability_zone'].partition(':')
|
||||
service = db.service_get_by_args(context.elevated(), host,
|
||||
'nova-volume')
|
||||
if not self.service_is_up(service):
|
||||
raise driver.WillNotSchedule("Host %s not available" % host)
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = datetime.datetime.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
results = db.service_get_all_volume_sorted(context)
|
||||
for result in results:
|
||||
(service, volume_gigabytes) = result
|
||||
|
||||
@@ -19,6 +19,8 @@
|
||||
Tests For Scheduler
|
||||
"""
|
||||
|
||||
import datetime
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import flags
|
||||
@@ -95,7 +97,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
self.manager.delete_user(self.user)
|
||||
self.manager.delete_project(self.project)
|
||||
|
||||
def _create_instance(self):
|
||||
def _create_instance(self, **kwargs):
|
||||
"""Create a test instance"""
|
||||
inst = {}
|
||||
inst['image_id'] = 'ami-test'
|
||||
@@ -106,6 +108,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
inst['vcpus'] = 1
|
||||
inst['availability_zone'] = kwargs.get('availability_zone', None)
|
||||
return db.instance_create(self.context, inst)['id']
|
||||
|
||||
def _create_volume(self):
|
||||
@@ -114,9 +117,33 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
vol['image_id'] = 'ami-test'
|
||||
vol['reservation_id'] = 'r-fakeres'
|
||||
vol['size'] = 1
|
||||
vol['availability_zone'] = 'test'
|
||||
return db.volume_create(self.context, vol)['id']
|
||||
|
||||
def test_hosts_are_up(self):
|
||||
def test_doesnt_report_disabled_hosts_as_up(self):
|
||||
"""Ensures driver doesn't find hosts before they are enabled"""
|
||||
# NOTE(vish): constructing service without create method
|
||||
# because we are going to use it without queue
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute1.start()
|
||||
compute2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2.start()
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
|
||||
db.service_update(self.context, s1['id'], {'disabled': True})
|
||||
db.service_update(self.context, s2['id'], {'disabled': True})
|
||||
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
|
||||
self.assertEqual(0, len(hosts))
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
def test_reports_enabled_hosts_as_up(self):
|
||||
"""Ensures driver can find the hosts that are up"""
|
||||
# NOTE(vish): constructing service without create method
|
||||
# because we are going to use it without queue
|
||||
@@ -131,7 +158,7 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
FLAGS.compute_manager)
|
||||
compute2.start()
|
||||
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
|
||||
self.assertEqual(len(hosts), 2)
|
||||
self.assertEqual(2, len(hosts))
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
@@ -158,6 +185,63 @@ class SimpleDriverTestCase(test.TestCase):
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
def test_specific_host_gets_instance(self):
|
||||
"""Ensures if you set availability_zone it launches on that zone"""
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute1.start()
|
||||
compute2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2.start()
|
||||
instance_id1 = self._create_instance()
|
||||
compute1.run_instance(self.context, instance_id1)
|
||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||
host = self.scheduler.driver.schedule_run_instance(self.context,
|
||||
instance_id2)
|
||||
self.assertEqual('host1', host)
|
||||
compute1.terminate_instance(self.context, instance_id1)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
def test_wont_sechedule_if_specified_host_is_down(self):
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute1.start()
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
now = datetime.datetime.utcnow()
|
||||
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
|
||||
past = now - delta
|
||||
db.service_update(self.context, s1['id'], {'updated_at': past})
|
||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||
self.assertRaises(driver.WillNotSchedule,
|
||||
self.scheduler.driver.schedule_run_instance,
|
||||
self.context,
|
||||
instance_id2)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
compute1.kill()
|
||||
|
||||
def test_will_schedule_on_disabled_host_if_specified(self):
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute1.start()
|
||||
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
|
||||
db.service_update(self.context, s1['id'], {'disabled': True})
|
||||
instance_id2 = self._create_instance(availability_zone='nova:host1')
|
||||
host = self.scheduler.driver.schedule_run_instance(self.context,
|
||||
instance_id2)
|
||||
self.assertEqual('host1', host)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
compute1.kill()
|
||||
|
||||
def test_too_many_cores(self):
|
||||
"""Ensures we don't go over max cores"""
|
||||
compute1 = service.Service('host1',
|
||||
|
||||
@@ -249,15 +249,16 @@ class XenAPIVMTestCase(test.TestCase):
|
||||
|
||||
def _create_instance(self):
|
||||
"""Creates and spawns a test instance"""
|
||||
values = {'name': 1, 'id': 1,
|
||||
'project_id': self.project.id,
|
||||
'user_id': self.user.id,
|
||||
'image_id': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type': 'm1.large',
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff'
|
||||
}
|
||||
values = {
|
||||
'name': 1,
|
||||
'id': 1,
|
||||
'project_id': self.project.id,
|
||||
'user_id': self.user.id,
|
||||
'image_id': 1,
|
||||
'kernel_id': 2,
|
||||
'ramdisk_id': 3,
|
||||
'instance_type': 'm1.large',
|
||||
'mac_address': 'aa:bb:cc:dd:ee:ff'}
|
||||
instance = db.instance_create(values)
|
||||
self.conn.spawn(instance)
|
||||
return instance
|
||||
|
||||
Reference in New Issue
Block a user