scheduler host_manager needs service for filters

distributed scheduler isn't checking service_is_up or
services['disabled'] due to filters not having access to service.

Fixed both.  Since ec2 API also uses service_down_time, I moved
service_is_up() into utils and made ec2 use it.

Change-Id: I0321844a47031b2de4d8738e032a4634edd1e945
This commit is contained in:
Chris Behrens
2012-01-19 21:36:42 -08:00
parent 575b780ca9
commit e9c35d861c
4 changed files with 13 additions and 9 deletions

View File

@@ -464,3 +464,6 @@ DEFINE_integer('zombie_instance_updated_at_window', 172800,
'being cleaned up.')
DEFINE_boolean('allow_ec2_admin_api', False, 'Enable/Disable EC2 Admin API')
DEFINE_integer('service_down_time', 60,
'maximum time since last check-in for up service')

View File

@@ -26,6 +26,7 @@ from nova import flags
from nova import exception
from nova.scheduler import driver
from nova.scheduler import chance
from nova import utils
FLAGS = flags.FLAGS
flags.DEFINE_integer("max_cores", 16,
@@ -57,7 +58,7 @@ class SimpleScheduler(chance.ChanceScheduler):
if host and context.is_admin:
service = db.service_get_by_args(elevated, host, 'nova-compute')
if not self.service_is_up(service):
if not utils.service_is_up(service):
raise exception.WillNotSchedule(host=host)
return host
@@ -79,7 +80,7 @@ class SimpleScheduler(chance.ChanceScheduler):
instance_cores + instance_opts['vcpus'] > FLAGS.max_cores:
msg = _("Not enough allocatable CPU cores remaining")
raise exception.NoValidHost(reason=msg)
if self.service_is_up(service):
if utils.service_is_up(service) and not service['disabled']:
return service['host']
msg = _("Is the appropriate service running?")
raise exception.NoValidHost(reason=msg)
@@ -120,7 +121,7 @@ class SimpleScheduler(chance.ChanceScheduler):
zone, _x, host = availability_zone.partition(':')
if host and context.is_admin:
service = db.service_get_by_args(elevated, host, 'nova-volume')
if not self.service_is_up(service):
if not utils.service_is_up(service):
raise exception.WillNotSchedule(host=host)
driver.cast_to_volume_host(context, host, 'create_volume',
volume_id=volume_id, **_kwargs)
@@ -135,7 +136,7 @@ class SimpleScheduler(chance.ChanceScheduler):
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
msg = _("Not enough allocatable volume gigabytes remaining")
raise exception.NoValidHost(reason=msg)
if self.service_is_up(service):
if utils.service_is_up(service) and not service['disabled']:
driver.cast_to_volume_host(context, service['host'],
'create_volume', volume_id=volume_id, **_kwargs)
return None

View File

@@ -215,7 +215,7 @@ class VsaScheduler(simple.SimpleScheduler):
zone, _x, host = availability_zone.partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-volume')
if not self.service_is_up(service):
if service['disabled'] or not utils.service_is_up(service):
raise exception.WillNotSchedule(host=host)
return host

View File

@@ -197,7 +197,7 @@ class VsaSchedulerTestCase(test.TestCase):
scheduled_volume = {'id': volume_id, 'host': values['host']}
def _fake_service_get_by_args(self, context, host, binary):
return "service"
return {'host': 'fake_host', 'disabled': False}
def _fake_service_is_up_True(self, service):
return True
@@ -386,7 +386,7 @@ class VsaSchedulerTestCase(test.TestCase):
self.stubs.Set(nova.db,
'service_get_by_args', self._fake_service_get_by_args)
self.stubs.Set(self.sched,
self.stubs.Set(utils,
'service_is_up', self._fake_service_is_up_False)
self.assertRaises(exception.WillNotSchedule,
@@ -395,7 +395,7 @@ class VsaSchedulerTestCase(test.TestCase):
request_spec,
availability_zone="nova:host_5")
self.stubs.Set(self.sched,
self.stubs.Set(utils,
'service_is_up', self._fake_service_is_up_True)
self.sched.schedule_create_volumes(self.context,
@@ -462,7 +462,7 @@ class VsaSchedulerTestCase(test.TestCase):
self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az)
self.stubs.Set(nova.db,
'service_get_by_args', self._fake_service_get_by_args)
self.stubs.Set(self.sched,
self.stubs.Set(utils,
'service_is_up', self._fake_service_is_up_True)
self.sched.schedule_create_volume(self.context,