scheduler host_manager needs service for filters
distributed scheduler isn't checking service_is_up or services['disabled'] due to filters not having access to service. Fixed both. Since ec2 API also uses service_down_time, I moved service_is_up() into utils and made ec2 use it. Change-Id: I0321844a47031b2de4d8738e032a4634edd1e945
This commit is contained in:
@@ -464,3 +464,6 @@ DEFINE_integer('zombie_instance_updated_at_window', 172800,
|
|||||||
'being cleaned up.')
|
'being cleaned up.')
|
||||||
|
|
||||||
DEFINE_boolean('allow_ec2_admin_api', False, 'Enable/Disable EC2 Admin API')
|
DEFINE_boolean('allow_ec2_admin_api', False, 'Enable/Disable EC2 Admin API')
|
||||||
|
|
||||||
|
DEFINE_integer('service_down_time', 60,
|
||||||
|
'maximum time since last check-in for up service')
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ from nova import flags
|
|||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.scheduler import driver
|
from nova.scheduler import driver
|
||||||
from nova.scheduler import chance
|
from nova.scheduler import chance
|
||||||
|
from nova import utils
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_integer("max_cores", 16,
|
flags.DEFINE_integer("max_cores", 16,
|
||||||
@@ -57,7 +58,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
|
|
||||||
if host and context.is_admin:
|
if host and context.is_admin:
|
||||||
service = db.service_get_by_args(elevated, host, 'nova-compute')
|
service = db.service_get_by_args(elevated, host, 'nova-compute')
|
||||||
if not self.service_is_up(service):
|
if not utils.service_is_up(service):
|
||||||
raise exception.WillNotSchedule(host=host)
|
raise exception.WillNotSchedule(host=host)
|
||||||
return host
|
return host
|
||||||
|
|
||||||
@@ -79,7 +80,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
instance_cores + instance_opts['vcpus'] > FLAGS.max_cores:
|
instance_cores + instance_opts['vcpus'] > FLAGS.max_cores:
|
||||||
msg = _("Not enough allocatable CPU cores remaining")
|
msg = _("Not enough allocatable CPU cores remaining")
|
||||||
raise exception.NoValidHost(reason=msg)
|
raise exception.NoValidHost(reason=msg)
|
||||||
if self.service_is_up(service):
|
if utils.service_is_up(service) and not service['disabled']:
|
||||||
return service['host']
|
return service['host']
|
||||||
msg = _("Is the appropriate service running?")
|
msg = _("Is the appropriate service running?")
|
||||||
raise exception.NoValidHost(reason=msg)
|
raise exception.NoValidHost(reason=msg)
|
||||||
@@ -120,7 +121,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
zone, _x, host = availability_zone.partition(':')
|
zone, _x, host = availability_zone.partition(':')
|
||||||
if host and context.is_admin:
|
if host and context.is_admin:
|
||||||
service = db.service_get_by_args(elevated, host, 'nova-volume')
|
service = db.service_get_by_args(elevated, host, 'nova-volume')
|
||||||
if not self.service_is_up(service):
|
if not utils.service_is_up(service):
|
||||||
raise exception.WillNotSchedule(host=host)
|
raise exception.WillNotSchedule(host=host)
|
||||||
driver.cast_to_volume_host(context, host, 'create_volume',
|
driver.cast_to_volume_host(context, host, 'create_volume',
|
||||||
volume_id=volume_id, **_kwargs)
|
volume_id=volume_id, **_kwargs)
|
||||||
@@ -135,7 +136,7 @@ class SimpleScheduler(chance.ChanceScheduler):
|
|||||||
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
|
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
|
||||||
msg = _("Not enough allocatable volume gigabytes remaining")
|
msg = _("Not enough allocatable volume gigabytes remaining")
|
||||||
raise exception.NoValidHost(reason=msg)
|
raise exception.NoValidHost(reason=msg)
|
||||||
if self.service_is_up(service):
|
if utils.service_is_up(service) and not service['disabled']:
|
||||||
driver.cast_to_volume_host(context, service['host'],
|
driver.cast_to_volume_host(context, service['host'],
|
||||||
'create_volume', volume_id=volume_id, **_kwargs)
|
'create_volume', volume_id=volume_id, **_kwargs)
|
||||||
return None
|
return None
|
||||||
|
|||||||
@@ -215,7 +215,7 @@ class VsaScheduler(simple.SimpleScheduler):
|
|||||||
zone, _x, host = availability_zone.partition(':')
|
zone, _x, host = availability_zone.partition(':')
|
||||||
service = db.service_get_by_args(context.elevated(), host,
|
service = db.service_get_by_args(context.elevated(), host,
|
||||||
'nova-volume')
|
'nova-volume')
|
||||||
if not self.service_is_up(service):
|
if service['disabled'] or not utils.service_is_up(service):
|
||||||
raise exception.WillNotSchedule(host=host)
|
raise exception.WillNotSchedule(host=host)
|
||||||
|
|
||||||
return host
|
return host
|
||||||
|
|||||||
@@ -197,7 +197,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
|||||||
scheduled_volume = {'id': volume_id, 'host': values['host']}
|
scheduled_volume = {'id': volume_id, 'host': values['host']}
|
||||||
|
|
||||||
def _fake_service_get_by_args(self, context, host, binary):
|
def _fake_service_get_by_args(self, context, host, binary):
|
||||||
return "service"
|
return {'host': 'fake_host', 'disabled': False}
|
||||||
|
|
||||||
def _fake_service_is_up_True(self, service):
|
def _fake_service_is_up_True(self, service):
|
||||||
return True
|
return True
|
||||||
@@ -386,7 +386,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
|||||||
|
|
||||||
self.stubs.Set(nova.db,
|
self.stubs.Set(nova.db,
|
||||||
'service_get_by_args', self._fake_service_get_by_args)
|
'service_get_by_args', self._fake_service_get_by_args)
|
||||||
self.stubs.Set(self.sched,
|
self.stubs.Set(utils,
|
||||||
'service_is_up', self._fake_service_is_up_False)
|
'service_is_up', self._fake_service_is_up_False)
|
||||||
|
|
||||||
self.assertRaises(exception.WillNotSchedule,
|
self.assertRaises(exception.WillNotSchedule,
|
||||||
@@ -395,7 +395,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
|||||||
request_spec,
|
request_spec,
|
||||||
availability_zone="nova:host_5")
|
availability_zone="nova:host_5")
|
||||||
|
|
||||||
self.stubs.Set(self.sched,
|
self.stubs.Set(utils,
|
||||||
'service_is_up', self._fake_service_is_up_True)
|
'service_is_up', self._fake_service_is_up_True)
|
||||||
|
|
||||||
self.sched.schedule_create_volumes(self.context,
|
self.sched.schedule_create_volumes(self.context,
|
||||||
@@ -462,7 +462,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
|||||||
self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az)
|
self.stubs.Set(nova.db, 'volume_get', _fake_volume_get_az)
|
||||||
self.stubs.Set(nova.db,
|
self.stubs.Set(nova.db,
|
||||||
'service_get_by_args', self._fake_service_get_by_args)
|
'service_get_by_args', self._fake_service_get_by_args)
|
||||||
self.stubs.Set(self.sched,
|
self.stubs.Set(utils,
|
||||||
'service_is_up', self._fake_service_is_up_True)
|
'service_is_up', self._fake_service_is_up_True)
|
||||||
|
|
||||||
self.sched.schedule_create_volume(self.context,
|
self.sched.schedule_create_volume(self.context,
|
||||||
|
|||||||
Reference in New Issue
Block a user