Move setup_instance_group to conductor
During prework of scheduler split, we identified that there is a call from setup_instance_group to the Nova object InstanceGroup. That prevents the scheduler to be split as a separate project because it would have an external dependency to a Nova object. The implementation here is providing the call by the conductor before the call to select_destinations as this method only modifies filter_properties for adding group details. Co-Authored-By: Ed Leafe <ed@leafe.com> Change-Id: Ib6556c8f6d6e6dcdb65a0bdb1894c66e92b35230
This commit is contained in:
parent
483d49bf62
commit
cc9b086f61
|
@ -596,6 +596,16 @@ class ComputeTaskManager(base.Base):
|
|||
# 2.0 of the RPC API.
|
||||
request_spec = scheduler_utils.build_request_spec(context, image,
|
||||
instances)
|
||||
# NOTE(sbauza): filter_properties['hints'] can be None
|
||||
hints = filter_properties.get('scheduler_hints', {}) or {}
|
||||
group_hint = hints.get('group')
|
||||
group_hosts = filter_properties.get('group_hosts')
|
||||
group_info = scheduler_utils.setup_instance_group(context, group_hint,
|
||||
group_hosts)
|
||||
if isinstance(group_info, tuple):
|
||||
filter_properties['group_updated'] = True
|
||||
(filter_properties['group_hosts'],
|
||||
filter_properties['group_policies']) = group_info
|
||||
# TODO(danms): Remove this in version 2.0 of the RPC API
|
||||
if (requested_networks and
|
||||
not isinstance(requested_networks,
|
||||
|
|
|
@ -26,7 +26,6 @@ from oslo.config import cfg
|
|||
from nova.compute import rpcapi as compute_rpcapi
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LW
|
||||
from nova import objects
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import rpc
|
||||
from nova.scheduler import driver
|
||||
|
@ -60,10 +59,6 @@ class FilterScheduler(driver.Scheduler):
|
|||
self.options = scheduler_options.SchedulerOptions()
|
||||
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
|
||||
self.notifier = rpc.get_notifier('scheduler')
|
||||
self._supports_affinity = scheduler_utils.validate_filter(
|
||||
'ServerGroupAffinityFilter')
|
||||
self._supports_anti_affinity = scheduler_utils.validate_filter(
|
||||
'ServerGroupAntiAffinityFilter')
|
||||
|
||||
# NOTE(alaski): Remove this method when the scheduler rpc interface is
|
||||
# bumped to 4.x as it is no longer used.
|
||||
|
@ -216,40 +211,6 @@ class FilterScheduler(driver.Scheduler):
|
|||
filter_properties['project_id'] = project_id
|
||||
filter_properties['os_type'] = os_type
|
||||
|
||||
def _setup_instance_group(self, context, filter_properties):
|
||||
"""Update filter_properties with server group info.
|
||||
|
||||
:returns: True if filter_properties has been updated, False if not.
|
||||
"""
|
||||
scheduler_hints = filter_properties.get('scheduler_hints') or {}
|
||||
group_hint = scheduler_hints.get('group', None)
|
||||
if not group_hint:
|
||||
return False
|
||||
|
||||
group = objects.InstanceGroup.get_by_hint(context, group_hint)
|
||||
policies = set(('anti-affinity', 'affinity'))
|
||||
if not any((policy in policies) for policy in group.policies):
|
||||
return False
|
||||
|
||||
if ('affinity' in group.policies and
|
||||
not self._supports_affinity):
|
||||
msg = _("ServerGroupAffinityFilter not configured")
|
||||
LOG.error(msg)
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
if ('anti-affinity' in group.policies and
|
||||
not self._supports_anti_affinity):
|
||||
msg = _("ServerGroupAntiAffinityFilter not configured")
|
||||
LOG.error(msg)
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
|
||||
filter_properties.setdefault('group_hosts', set())
|
||||
user_hosts = set(filter_properties['group_hosts'])
|
||||
group_hosts = set(group.get_hosts(context))
|
||||
filter_properties['group_hosts'] = user_hosts | group_hosts
|
||||
filter_properties['group_policies'] = group.policies
|
||||
|
||||
return True
|
||||
|
||||
def _schedule(self, context, request_spec, filter_properties):
|
||||
"""Returns a list of hosts that meet the required specs,
|
||||
ordered by their fitness.
|
||||
|
@ -259,8 +220,7 @@ class FilterScheduler(driver.Scheduler):
|
|||
instance_type = request_spec.get("instance_type", None)
|
||||
instance_uuids = request_spec.get("instance_uuids", None)
|
||||
|
||||
update_group_hosts = self._setup_instance_group(context,
|
||||
filter_properties)
|
||||
update_group_hosts = filter_properties.get('group_updated', False)
|
||||
|
||||
config_options = self._get_configuration_options()
|
||||
|
||||
|
@ -326,6 +286,12 @@ class FilterScheduler(driver.Scheduler):
|
|||
if pci_requests:
|
||||
del instance_properties['pci_requests']
|
||||
if update_group_hosts is True:
|
||||
# NOTE(sbauza): Group details are serialized into a list now
|
||||
# that they are populated by the conductor, we need to
|
||||
# deserialize them
|
||||
if isinstance(filter_properties['group_hosts'], list):
|
||||
filter_properties['group_hosts'] = set(
|
||||
filter_properties['group_hosts'])
|
||||
filter_properties['group_hosts'].add(chosen_host.obj.host)
|
||||
return selected_hosts
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ from nova import db
|
|||
from nova import exception
|
||||
from nova.i18n import _, _LE, _LW
|
||||
from nova import notifications
|
||||
from nova import objects
|
||||
from nova.objects import base as obj_base
|
||||
from nova.openstack.common import log as logging
|
||||
from nova import rpc
|
||||
|
@ -41,6 +42,8 @@ scheduler_opts = [
|
|||
CONF = cfg.CONF
|
||||
CONF.register_opts(scheduler_opts)
|
||||
|
||||
CONF.import_opt('scheduler_default_filters', 'nova.scheduler.host_manager')
|
||||
|
||||
|
||||
def build_request_spec(ctxt, image, instances, instance_type=None):
|
||||
"""Build a request_spec for the scheduler.
|
||||
|
@ -238,3 +241,42 @@ def parse_options(opts, sep='=', converter=str, name=""):
|
|||
def validate_filter(filter):
|
||||
"""Validates that the filter is configured in the default filters."""
|
||||
return filter in CONF.scheduler_default_filters
|
||||
|
||||
|
||||
_SUPPORTS_AFFINITY = None
|
||||
_SUPPORTS_ANTI_AFFINITY = None
|
||||
|
||||
|
||||
def setup_instance_group(context, group_hint, user_group_hosts=None):
|
||||
"""Provides group_hosts and group_policies sets related to the group
|
||||
provided by hint if corresponding filters are enabled.
|
||||
|
||||
:param group_hint: 'group' scheduler hint
|
||||
:param user_group_hosts: Hosts from the group or empty set
|
||||
|
||||
:returns: None or tuple (group_hosts, group_policies)
|
||||
"""
|
||||
global _SUPPORTS_AFFINITY
|
||||
if _SUPPORTS_AFFINITY is None:
|
||||
_SUPPORTS_AFFINITY = validate_filter(
|
||||
'ServerGroupAffinityFilter')
|
||||
global _SUPPORTS_ANTI_AFFINITY
|
||||
if _SUPPORTS_ANTI_AFFINITY is None:
|
||||
_SUPPORTS_ANTI_AFFINITY = validate_filter(
|
||||
'ServerGroupAntiAffinityFilter')
|
||||
if group_hint:
|
||||
group = objects.InstanceGroup.get_by_hint(context, group_hint)
|
||||
policies = set(('anti-affinity', 'affinity'))
|
||||
if any((policy in policies) for policy in group.policies):
|
||||
if ('affinity' in group.policies and not _SUPPORTS_AFFINITY):
|
||||
msg = _("ServerGroupAffinityFilter not configured")
|
||||
LOG.error(msg)
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
if ('anti-affinity' in group.policies and
|
||||
not _SUPPORTS_ANTI_AFFINITY):
|
||||
msg = _("ServerGroupAntiAffinityFilter not configured")
|
||||
LOG.error(msg)
|
||||
raise exception.NoValidHost(reason=msg)
|
||||
group_hosts = set(group.get_hosts(context))
|
||||
user_hosts = set(user_group_hosts) if user_group_hosts else set()
|
||||
return (user_hosts | group_hosts, group.policies)
|
||||
|
|
|
@ -1246,6 +1246,7 @@ class _BaseTaskTestCase(object):
|
|||
instance_properties = jsonutils.to_primitive(instances[0])
|
||||
|
||||
self.mox.StubOutWithMock(db, 'flavor_extra_specs_get')
|
||||
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
|
||||
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
|
||||
'select_destinations')
|
||||
self.mox.StubOutWithMock(db, 'instance_get_by_uuid')
|
||||
|
@ -1257,6 +1258,7 @@ class _BaseTaskTestCase(object):
|
|||
db.flavor_extra_specs_get(
|
||||
self.context,
|
||||
instance_type['flavorid']).AndReturn('fake-specs')
|
||||
scheduler_utils.setup_instance_group(self.context, None, None)
|
||||
self.conductor_manager.scheduler_client.select_destinations(
|
||||
self.context, {'image': {'fake_data': 'should_pass_silently'},
|
||||
'instance_properties': jsonutils.to_primitive(
|
||||
|
@ -1343,12 +1345,14 @@ class _BaseTaskTestCase(object):
|
|||
'instance_properties': instances[0]}
|
||||
exception = exc.NoValidHost(reason='fake-reason')
|
||||
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
|
||||
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
|
||||
self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
|
||||
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
|
||||
'select_destinations')
|
||||
|
||||
scheduler_utils.build_request_spec(self.context, image,
|
||||
mox.IgnoreArg()).AndReturn(spec)
|
||||
scheduler_utils.setup_instance_group(self.context, None, None)
|
||||
self.conductor_manager.scheduler_client.select_destinations(
|
||||
self.context, spec,
|
||||
{'retry': {'num_attempts': 1,
|
||||
|
@ -1995,6 +1999,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||
spec = {'fake': 'specs',
|
||||
'instance_properties': instances[0]}
|
||||
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
|
||||
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
|
||||
self.mox.StubOutWithMock(scheduler_driver, 'handle_schedule_error')
|
||||
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
|
||||
'select_destinations')
|
||||
|
@ -2003,6 +2008,7 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||
|
||||
scheduler_utils.build_request_spec(self.context, image,
|
||||
mox.IgnoreArg()).AndReturn(spec)
|
||||
scheduler_utils.setup_instance_group(self.context, None, None)
|
||||
self.conductor_manager.scheduler_client.select_destinations(
|
||||
self.context, spec,
|
||||
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
|
||||
|
@ -2040,8 +2046,10 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||
block_device_mapping='block_device_mapping',
|
||||
legacy_bdm=False)
|
||||
|
||||
@mock.patch.object(scheduler_utils, 'setup_instance_group')
|
||||
@mock.patch.object(scheduler_utils, 'build_request_spec')
|
||||
def test_build_instances_info_cache_not_found(self, build_request_spec):
|
||||
def test_build_instances_info_cache_not_found(self, build_request_spec,
|
||||
setup_instance_group):
|
||||
instances = [fake_instance.fake_instance_obj(self.context)
|
||||
for i in xrange(2)]
|
||||
image = {'fake-data': 'should_pass_silently'}
|
||||
|
@ -2076,6 +2084,8 @@ class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
|
|||
block_device_mapping='block_device_mapping',
|
||||
legacy_bdm=False)
|
||||
|
||||
setup_instance_group.assert_called_once_with(
|
||||
self.context, None, None)
|
||||
build_and_run_instance.assert_called_once_with(self.context,
|
||||
instance=instances[1], host='host2', image={'fake-data':
|
||||
'should_pass_silently'}, request_spec=spec,
|
||||
|
|
|
@ -16,9 +16,6 @@
|
|||
Tests For Filter Scheduler.
|
||||
"""
|
||||
|
||||
import contextlib
|
||||
import uuid
|
||||
|
||||
import mock
|
||||
import mox
|
||||
|
||||
|
@ -27,13 +24,11 @@ from nova.compute import vm_states
|
|||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import objects
|
||||
from nova.scheduler import driver
|
||||
from nova.scheduler import filter_scheduler
|
||||
from nova.scheduler import host_manager
|
||||
from nova.scheduler import utils as scheduler_utils
|
||||
from nova.scheduler import weights
|
||||
from nova.tests import fake_instance
|
||||
from nova.tests.scheduler import fakes
|
||||
from nova.tests.scheduler import test_scheduler
|
||||
|
||||
|
@ -369,86 +364,6 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
|
|||
|
||||
self.assertEqual({'vcpus': 5}, host_state.limits)
|
||||
|
||||
def _create_server_group(self, policy='anti-affinity'):
|
||||
instance = fake_instance.fake_instance_obj(self.context,
|
||||
params={'host': 'hostA'})
|
||||
|
||||
group = objects.InstanceGroup()
|
||||
group.name = 'pele'
|
||||
group.uuid = str(uuid.uuid4())
|
||||
group.members = [instance.uuid]
|
||||
group.policies = [policy]
|
||||
return group
|
||||
|
||||
def _group_details_in_filter_properties(self, group, func='get_by_uuid',
|
||||
hint=None, policy=None):
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
filter_properties = {
|
||||
'scheduler_hints': {
|
||||
'group': hint,
|
||||
},
|
||||
'group_hosts': ['hostB'],
|
||||
}
|
||||
|
||||
with contextlib.nested(
|
||||
mock.patch.object(objects.InstanceGroup, func, return_value=group),
|
||||
mock.patch.object(objects.InstanceGroup, 'get_hosts',
|
||||
return_value=['hostA']),
|
||||
) as (get_group, get_hosts):
|
||||
sched._supports_anti_affinity = True
|
||||
update_group_hosts = sched._setup_instance_group(self.context,
|
||||
filter_properties)
|
||||
self.assertTrue(update_group_hosts)
|
||||
self.assertEqual(set(['hostA', 'hostB']),
|
||||
filter_properties['group_hosts'])
|
||||
self.assertEqual([policy], filter_properties['group_policies'])
|
||||
|
||||
def test_group_details_in_filter_properties(self):
|
||||
for policy in ['affinity', 'anti-affinity']:
|
||||
group = self._create_server_group(policy)
|
||||
self._group_details_in_filter_properties(group, func='get_by_uuid',
|
||||
hint=group.uuid,
|
||||
policy=policy)
|
||||
|
||||
def _group_filter_with_filter_not_configured(self, policy):
|
||||
self.flags(scheduler_default_filters=['f1', 'f2'])
|
||||
sched = fakes.FakeFilterScheduler()
|
||||
|
||||
instance = fake_instance.fake_instance_obj(self.context,
|
||||
params={'host': 'hostA'})
|
||||
|
||||
group = objects.InstanceGroup()
|
||||
group.uuid = str(uuid.uuid4())
|
||||
group.members = [instance.uuid]
|
||||
group.policies = [policy]
|
||||
|
||||
filter_properties = {
|
||||
'scheduler_hints': {
|
||||
'group': group.uuid,
|
||||
},
|
||||
}
|
||||
|
||||
with contextlib.nested(
|
||||
mock.patch.object(objects.InstanceGroup, 'get_by_uuid',
|
||||
return_value=group),
|
||||
mock.patch.object(objects.InstanceGroup, 'get_hosts',
|
||||
return_value=['hostA']),
|
||||
) as (get_group, get_hosts):
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
sched._setup_instance_group, self.context,
|
||||
filter_properties)
|
||||
|
||||
def test_group_filter_with_filter_not_configured(self):
|
||||
policies = ['anti-affinity', 'affinity']
|
||||
for policy in policies:
|
||||
self._group_filter_with_filter_not_configured(policy)
|
||||
|
||||
def test_group_name_details_in_filter_properties(self):
|
||||
group = self._create_server_group()
|
||||
self._group_details_in_filter_properties(group, 'get_by_name',
|
||||
group.name, 'anti-affinity')
|
||||
|
||||
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
|
||||
return_value={'numa_topology': None,
|
||||
'pci_requests': None})
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
"""
|
||||
Tests For Scheduler Utils
|
||||
"""
|
||||
import contextlib
|
||||
import uuid
|
||||
|
||||
import mock
|
||||
import mox
|
||||
from oslo.config import cfg
|
||||
|
@ -24,6 +27,7 @@ from nova.compute import utils as compute_utils
|
|||
from nova import db
|
||||
from nova import exception
|
||||
from nova import notifications
|
||||
from nova import objects
|
||||
from nova import rpc
|
||||
from nova.scheduler import utils as scheduler_utils
|
||||
from nova import test
|
||||
|
@ -86,9 +90,9 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
|
|||
old_ref = 'old_ref'
|
||||
new_ref = 'new_ref'
|
||||
|
||||
for uuid in expected_uuids:
|
||||
for _uuid in expected_uuids:
|
||||
db.instance_update_and_get_original(
|
||||
self.context, uuid, updates).AndReturn((old_ref, new_ref))
|
||||
self.context, _uuid, updates).AndReturn((old_ref, new_ref))
|
||||
notifications.send_update(self.context, old_ref, new_ref,
|
||||
service=service)
|
||||
compute_utils.add_instance_fault_from_exc(
|
||||
|
@ -98,7 +102,7 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
|
|||
payload = dict(request_spec=request_spec,
|
||||
instance_properties=request_spec.get(
|
||||
'instance_properties', {}),
|
||||
instance_id=uuid,
|
||||
instance_id=_uuid,
|
||||
state='fake-vm-state',
|
||||
method=method,
|
||||
reason=exc_info)
|
||||
|
@ -234,3 +238,77 @@ class SchedulerUtilsTestCase(test.NoDBTestCase):
|
|||
self.assertTrue(scheduler_utils.validate_filter('FakeFilter1'))
|
||||
self.assertTrue(scheduler_utils.validate_filter('FakeFilter2'))
|
||||
self.assertFalse(scheduler_utils.validate_filter('FakeFilter3'))
|
||||
|
||||
def _create_server_group(self, policy='anti-affinity'):
|
||||
instance = fake_instance.fake_instance_obj(self.context,
|
||||
params={'host': 'hostA'})
|
||||
|
||||
group = objects.InstanceGroup()
|
||||
group.name = 'pele'
|
||||
group.uuid = str(uuid.uuid4())
|
||||
group.members = [instance.uuid]
|
||||
group.policies = [policy]
|
||||
return group
|
||||
|
||||
def _group_details_in_filter_properties(self, group, func='get_by_uuid',
|
||||
hint=None, policy=None):
|
||||
group_hint = hint
|
||||
group_hosts = ['hostB']
|
||||
|
||||
with contextlib.nested(
|
||||
mock.patch.object(objects.InstanceGroup, func, return_value=group),
|
||||
mock.patch.object(objects.InstanceGroup, 'get_hosts',
|
||||
return_value=['hostA']),
|
||||
) as (get_group, get_hosts):
|
||||
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
|
||||
scheduler_utils._SUPPORTS_AFFINITY = None
|
||||
group_info = scheduler_utils.setup_instance_group(
|
||||
self.context, group_hint, group_hosts)
|
||||
self.assertEqual(
|
||||
(set(['hostA', 'hostB']), [policy]),
|
||||
group_info)
|
||||
|
||||
def test_group_details_in_filter_properties(self):
|
||||
for policy in ['affinity', 'anti-affinity']:
|
||||
group = self._create_server_group(policy)
|
||||
self._group_details_in_filter_properties(group, func='get_by_uuid',
|
||||
hint=group.uuid,
|
||||
policy=policy)
|
||||
|
||||
def _group_filter_with_filter_not_configured(self, policy):
|
||||
self.flags(scheduler_default_filters=['f1', 'f2'])
|
||||
|
||||
instance = fake_instance.fake_instance_obj(self.context,
|
||||
params={'host': 'hostA'})
|
||||
|
||||
group = objects.InstanceGroup()
|
||||
group.uuid = str(uuid.uuid4())
|
||||
group.members = [instance.uuid]
|
||||
group.policies = [policy]
|
||||
|
||||
with contextlib.nested(
|
||||
mock.patch.object(objects.InstanceGroup, 'get_by_uuid',
|
||||
return_value=group),
|
||||
mock.patch.object(objects.InstanceGroup, 'get_hosts',
|
||||
return_value=['hostA']),
|
||||
) as (get_group, get_hosts):
|
||||
scheduler_utils._SUPPORTS_ANTI_AFFINITY = None
|
||||
scheduler_utils._SUPPORTS_AFFINITY = None
|
||||
self.assertRaises(exception.NoValidHost,
|
||||
scheduler_utils.setup_instance_group,
|
||||
self.context, group.uuid)
|
||||
|
||||
def test_group_filter_with_filter_not_configured(self):
|
||||
policies = ['anti-affinity', 'affinity']
|
||||
for policy in policies:
|
||||
self._group_filter_with_filter_not_configured(policy)
|
||||
|
||||
def test_group_uuid_details_in_filter_properties(self):
|
||||
group = self._create_server_group()
|
||||
self._group_details_in_filter_properties(group, 'get_by_uuid',
|
||||
group.uuid, 'anti-affinity')
|
||||
|
||||
def test_group_name_details_in_filter_properties(self):
|
||||
group = self._create_server_group()
|
||||
self._group_details_in_filter_properties(group, 'get_by_name',
|
||||
group.name, 'anti-affinity')
|
||||
|
|
Loading…
Reference in New Issue