Disable compute fanout to scheduler

Now that nothing is using publish_service_capabilities it can be
disabled, but not removed in order to not break compute rpcapi backwards
compatibility.

Part of bp no-compute-fanout-to-scheduler.

Change-Id: I80c49c46138fd6ee89cb08ffbbced72ada4de72e
This commit is contained in:
Joe Gordon 2013-08-13 17:00:43 -07:00
parent 5ce74eb546
commit 674ef05b0a
7 changed files with 8 additions and 31 deletions

View File

@ -762,7 +762,6 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.filter_defer_apply_off()
self._report_driver_status(context)
self.publish_service_capabilities(context)
def pre_start_hook(self):
"""After the service is initialized, but before we fully bring

View File

@ -787,9 +787,6 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
instance=instance_p),
topic=_compute_topic(self.topic, ctxt, None, instance))
def publish_service_capabilities(self, ctxt):
self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities'))
def soft_delete_instance(self, ctxt, instance, reservations=None):
if self.can_send_version('2.35'):
version = '2.35'

View File

@ -152,13 +152,14 @@ class SchedulerDependentManager(Manager):
capabilities = [capabilities]
self.last_capabilities = capabilities
@periodic_task.periodic_task
def publish_service_capabilities(self, context):
"""Pass data back to the scheduler.
Called at a periodic interval. And also called via rpc soon after
the start of the scheduler.
"""
#NOTE(jogo): this is now deprecated, unused and can be removed in
#V3.0 of compute RPCAPI
if self.last_capabilities:
LOG.debug(_('Notifying Schedulers of capabilities ...'))
self.scheduler_rpcapi.update_service_capabilities(context,

View File

@ -29,7 +29,6 @@ from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova.conductor.tasks import live_migrate
import nova.context
from nova import exception
from nova import manager
from nova.objects import instance as instance_obj
@ -68,16 +67,12 @@ class SchedulerManager(manager.Manager):
super(SchedulerManager, self).__init__(service_name='scheduler',
*args, **kwargs)
def post_start_hook(self):
"""After we start up and can receive messages via RPC, tell all
compute nodes to send us their capabilities.
"""
ctxt = nova.context.get_admin_context()
compute_rpcapi.ComputeAPI().publish_service_capabilities(ctxt)
def update_service_capabilities(self, context, service_name,
host, capabilities):
"""Process a capability update from a service node."""
#NOTE(jogo) This is deprecated, but is used by the deprecated
# publish_service_capabilities call. So this can begin its removal
# process once publish_service_capabilities is removed.
if not isinstance(capabilities, list):
capabilities = [capabilities]
for capability in capabilities:

View File

@ -125,6 +125,9 @@ class SchedulerAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def update_service_capabilities(self, ctxt, service_name, host,
capabilities):
#NOTE(jogo) This is deprecated, but is used by the deprecated
# publish_service_capabilities call. So this can begin its removal
# process once publish_service_capabilities is removed.
self.fanout_cast(ctxt, self.make_msg('update_service_capabilities',
service_name=service_name, host=host,
capabilities=capabilities),

View File

@ -99,10 +99,6 @@ FAKE_IMAGE_REF = 'fake-image-ref'
NODENAME = 'fakenode1'
def nop_report_driver_status(self):
pass
def get_primitive_instance_by_uuid(context, instance_uuid):
"""
Helper method to get an instance and then convert it to
@ -4371,9 +4367,6 @@ class ComputeTestCase(BaseTestCase):
def test_run_kill_vm(self):
# Detect when a vm is terminated behind the scenes.
self.stubs.Set(compute_manager.ComputeManager,
'_report_driver_status', nop_report_driver_status)
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)

View File

@ -172,8 +172,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
mox.IsA(instance_obj.Instance))
if defer_iptables_apply:
self.compute.driver.filter_defer_apply_off()
self.compute._report_driver_status(fake_context)
self.compute.publish_service_capabilities(fake_context)
self.mox.StubOutWithMock(self.compute.driver, 'init_host')
self.mox.StubOutWithMock(self.compute.driver,
@ -186,10 +184,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
'_destroy_evacuated_instances')
self.mox.StubOutWithMock(self.compute,
'_init_instance')
self.mox.StubOutWithMock(self.compute,
'_report_driver_status')
self.mox.StubOutWithMock(self.compute,
'publish_service_capabilities')
# Test with defer_iptables_apply
self.flags(defer_iptables_apply=True)
@ -229,8 +223,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.mox.StubOutWithMock(self.compute, 'init_virt_events')
self.mox.StubOutWithMock(self.compute, '_get_instances_on_driver')
self.mox.StubOutWithMock(self.compute, '_init_instance')
self.mox.StubOutWithMock(self.compute, '_report_driver_status')
self.mox.StubOutWithMock(self.compute, 'publish_service_capabilities')
self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
self.compute.driver.init_host(host=our_host)
@ -251,9 +243,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.compute.driver.destroy(deleted_instance,
mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
self.compute._report_driver_status(fake_context)
self.compute.publish_service_capabilities(fake_context)
self.mox.ReplayAll()
self.compute.init_host()
# tearDown() uses context.get_admin_context(), so we have