From 09898781656c987afe7019aaa63a68eda142f72e Mon Sep 17 00:00:00 2001 From: Rui Chen Date: Thu, 26 Feb 2015 17:29:14 +0800 Subject: [PATCH] Scheduler multiple workers support Multiple process workers support for nova-scheduler. Since blueprint placement-claims in Pike, the FilterScheduler uses the Placement service to create resource allocations (claims) against a resource provider (i.e. compute node) chosen by the scheduler. That reduces the risk of scheduling collisions when running multiple schedulers, so this change adds the ability to set multiple scheduler workers like for the nova-osapi_compute and nova-conductor services. Co-Authored-By: Matt Riedemann Change-Id: Ifdcd363d7bc22e73d76d69777483e5aaff4036e3 --- nova/cmd/scheduler.py | 9 ++- nova/conf/scheduler.py | 11 ++++ nova/tests/unit/cmd/test_scheduler.py | 61 +++++++++++++++++++ ...le-scheduler-workers-3e5ac0d86f436338.yaml | 26 ++++++++ 4 files changed, 106 insertions(+), 1 deletion(-) create mode 100644 nova/tests/unit/cmd/test_scheduler.py create mode 100644 releasenotes/notes/multiple-scheduler-workers-3e5ac0d86f436338.yaml diff --git a/nova/cmd/scheduler.py b/nova/cmd/scheduler.py index 51d5aee4ac8d..dafec005b48e 100644 --- a/nova/cmd/scheduler.py +++ b/nova/cmd/scheduler.py @@ -18,6 +18,7 @@ import sys +from oslo_concurrency import processutils from oslo_log import log as logging from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts @@ -45,5 +46,11 @@ def main(): server = service.Service.create(binary='nova-scheduler', topic=scheduler_rpcapi.RPC_TOPIC) - service.serve(server) + # Determine the number of workers; if not specified in config, default + # to ncpu for the FilterScheduler and 1 for everything else. + workers = CONF.scheduler.workers + if not workers: + workers = (processutils.get_worker_count() + if CONF.scheduler.driver == 'filter_scheduler' else 1) + service.serve(server, workers=workers) service.wait() diff --git a/nova/conf/scheduler.py b/nova/conf/scheduler.py index 2cb66e1aa317..9461fa27bc5e 100644 --- a/nova/conf/scheduler.py +++ b/nova/conf/scheduler.py @@ -63,6 +63,10 @@ Possible values: * You may also set this to the entry point name of a custom scheduler driver, but you will be responsible for creating and maintaining it in your setup.cfg file. + +Related options: + +* workers """), cfg.IntOpt("periodic_task_interval", default=60, @@ -141,6 +145,13 @@ etc. of the scheduler. This option is only used by the FilterScheduler; if you use a different scheduler, this option has no effect. +"""), + cfg.IntOpt("workers", + min=0, + help=""" +Number of workers for the nova-scheduler service. The default will be the +number of CPUs available if using the "filter_scheduler" scheduler driver, +otherwise the default will be 1. """), ] diff --git a/nova/tests/unit/cmd/test_scheduler.py b/nova/tests/unit/cmd/test_scheduler.py new file mode 100644 index 000000000000..2d5029cb82f4 --- /dev/null +++ b/nova/tests/unit/cmd/test_scheduler.py @@ -0,0 +1,61 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import mock + +from nova.cmd import scheduler +from nova import config +from nova import test + + +# required because otherwise oslo early parse_args dies +@mock.patch.object(config, 'parse_args', new=lambda *args, **kwargs: None) +class TestScheduler(test.NoDBTestCase): + + @mock.patch('nova.service.Service.create') + @mock.patch('nova.service.serve') + @mock.patch('nova.service.wait') + @mock.patch('oslo_concurrency.processutils.get_worker_count', + return_value=2) + def test_workers_defaults(self, get_worker_count, mock_wait, mock_serve, + service_create): + scheduler.main() + get_worker_count.assert_called_once_with() + mock_serve.assert_called_once_with( + service_create.return_value, workers=2) + mock_wait.assert_called_once_with() + + @mock.patch('nova.service.Service.create') + @mock.patch('nova.service.serve') + @mock.patch('nova.service.wait') + @mock.patch('oslo_concurrency.processutils.get_worker_count') + def test_workers_override(self, get_worker_count, mock_wait, mock_serve, + service_create): + self.flags(workers=4, group='scheduler') + scheduler.main() + get_worker_count.assert_not_called() + mock_serve.assert_called_once_with( + service_create.return_value, workers=4) + mock_wait.assert_called_once_with() + + @mock.patch('nova.service.Service.create') + @mock.patch('nova.service.serve') + @mock.patch('nova.service.wait') + @mock.patch('oslo_concurrency.processutils.get_worker_count') + def test_workers_caching_scheduler(self, get_worker_count, mock_wait, + mock_serve, service_create): + self.flags(driver='caching_scheduler', group='scheduler') + scheduler.main() + get_worker_count.assert_not_called() + mock_serve.assert_called_once_with( + service_create.return_value, workers=1) + mock_wait.assert_called_once_with() diff --git a/releasenotes/notes/multiple-scheduler-workers-3e5ac0d86f436338.yaml b/releasenotes/notes/multiple-scheduler-workers-3e5ac0d86f436338.yaml new file mode 100644 index 000000000000..58a80e22d297 --- /dev/null +++ b/releasenotes/notes/multiple-scheduler-workers-3e5ac0d86f436338.yaml @@ -0,0 +1,26 @@ +--- +features: + - | + It is now possible to configure multiple *nova-scheduler* workers via the + ``[scheduler]workers`` configuration option. By default, the option runs + ``ncpu`` workers if using the ``filter_scheduler`` scheduler driver, + otherwise the default is 1. + + Since `blueprint placement-claims`_ in Pike, the FilterScheduler + uses the Placement service to create resource allocations (claims) + against a resource provider (i.e. compute node) chosen by the scheduler. + That reduces the risk of scheduling collisions when running multiple + schedulers. + + Since other scheduler drivers, like the CachingScheduler, do not + use Placement, it is recommended to set workers=1 (default) for those + other scheduler drivers. + + .. _blueprint placement-claims: https://specs.openstack.org/openstack/nova-specs/specs/pike/implemented/placement-claims.html +upgrade: + - | + The new ``[scheduler]workers`` configuration option defaults to ``ncpu`` + workers if using the ``filter_scheduler`` scheduler driver. If you are + running *nova-scheduler* on the same host as other services, you may want + to change this default value, or to otherwise account for running other + instances of the *nova-scheduler* service. \ No newline at end of file