Merge "Add auto-reloading JSON config file support to scheduler."

This commit is contained in:
Jenkins 2011-11-08 14:40:32 +00:00 committed by Gerrit Code Review
commit 2d434e1389
11 changed files with 350 additions and 152 deletions

View File

@ -25,21 +25,21 @@ import types
import M2Crypto import M2Crypto
from nova.compute import api as compute_api
from novaclient import v1_1 as novaclient from novaclient import v1_1 as novaclient
from novaclient import exceptions as novaclient_exceptions from novaclient import exceptions as novaclient_exceptions
from nova import crypto from nova import crypto
from nova import db from nova import db
from nova import exception from nova import exception
from nova import flags from nova import flags
from nova import log as logging from nova import log as logging
from nova import rpc from nova import rpc
from nova.compute import api as compute_api
from nova.scheduler import api from nova.scheduler import api
from nova.scheduler import driver from nova.scheduler import driver
from nova.scheduler import filters from nova.scheduler import filters
from nova.scheduler import least_cost from nova.scheduler import least_cost
from nova.scheduler import scheduler_options
from nova import utils
FLAGS = flags.FLAGS FLAGS = flags.FLAGS
@ -59,6 +59,10 @@ class DistributedScheduler(driver.Scheduler):
"""Scheduler that can work across any nova deployment, from simple """Scheduler that can work across any nova deployment, from simple
deployments to multiple nested zones. deployments to multiple nested zones.
""" """
def __init__(self, *args, **kwargs):
super(DistributedScheduler, self).__init__(*args, **kwargs)
self.cost_function_cache = {}
self.options = scheduler_options.SchedulerOptions()
def schedule(self, context, topic, method, *args, **kwargs): def schedule(self, context, topic, method, *args, **kwargs):
"""The schedule() contract requires we return the one """The schedule() contract requires we return the one
@ -243,6 +247,10 @@ class DistributedScheduler(driver.Scheduler):
"""Broken out for testing.""" """Broken out for testing."""
return db.zone_get_all(context) return db.zone_get_all(context)
def _get_configuration_options(self):
"""Fetch options dictionary. Broken out for testing."""
return self.options.get_configuration()
def _schedule(self, elevated, topic, request_spec, *args, **kwargs): def _schedule(self, elevated, topic, request_spec, *args, **kwargs):
"""Returns a list of hosts that meet the required specs, """Returns a list of hosts that meet the required specs,
ordered by their fitness. ordered by their fitness.
@ -257,9 +265,13 @@ class DistributedScheduler(driver.Scheduler):
"provisioning.") "provisioning.")
raise NotImplementedError(msg) raise NotImplementedError(msg)
cost_functions = self.get_cost_functions()
ram_requirement_mb = instance_type['memory_mb'] ram_requirement_mb = instance_type['memory_mb']
disk_requirement_bg = instance_type['local_gb'] disk_requirement_bg = instance_type['local_gb']
options = self._get_configuration_options()
# Find our local list of acceptable hosts by repeatedly # Find our local list of acceptable hosts by repeatedly
# filtering and weighing our options. Each time we choose a # filtering and weighing our options. Each time we choose a
# host, we virtually consume resources on it so subsequent # host, we virtually consume resources on it so subsequent
@ -274,7 +286,7 @@ class DistributedScheduler(driver.Scheduler):
for num in xrange(num_instances): for num in xrange(num_instances):
# Filter local hosts based on requirements ... # Filter local hosts based on requirements ...
filtered_hosts = self._filter_hosts(topic, request_spec, filtered_hosts = self._filter_hosts(topic, request_spec,
unfiltered_hosts) unfiltered_hosts, options)
if not filtered_hosts: if not filtered_hosts:
# Can't get any more locally. # Can't get any more locally.
@ -284,8 +296,8 @@ class DistributedScheduler(driver.Scheduler):
# weighted_host = WeightedHost() ... the best # weighted_host = WeightedHost() ... the best
# host for the job. # host for the job.
weighted_host = least_cost.weigh_hosts(request_spec, weighted_host = least_cost.weighted_sum(cost_functions,
filtered_hosts) filtered_hosts, options)
LOG.debug(_("Weighted %(weighted_host)s") % locals()) LOG.debug(_("Weighted %(weighted_host)s") % locals())
selected_hosts.append(weighted_host) selected_hosts.append(weighted_host)
@ -343,7 +355,7 @@ class DistributedScheduler(driver.Scheduler):
raise exception.SchedulerHostFilterNotFound(filter_name=msg) raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters return good_filters
def _filter_hosts(self, topic, request_spec, hosts=None): def _filter_hosts(self, topic, request_spec, hosts, options):
"""Filter the full host list. hosts = [(host, HostInfo()), ...]. """Filter the full host list. hosts = [(host, HostInfo()), ...].
This method returns a subset of hosts, in the same format.""" This method returns a subset of hosts, in the same format."""
selected_filters = self._choose_host_filters() selected_filters = self._choose_host_filters()
@ -358,6 +370,48 @@ class DistributedScheduler(driver.Scheduler):
for selected_filter in selected_filters: for selected_filter in selected_filters:
query = selected_filter.instance_type_to_filter(instance_type) query = selected_filter.instance_type_to_filter(instance_type)
hosts = selected_filter.filter_hosts(hosts, query) hosts = selected_filter.filter_hosts(hosts, query, options)
return hosts return hosts
def get_cost_functions(self, topic=None):
"""Returns a list of tuples containing weights and cost functions to
use for weighing hosts
"""
if topic is None:
# Schedulers only support compute right now.
topic = "compute"
if topic in self.cost_function_cache:
return self.cost_function_cache[topic]
cost_fns = []
for cost_fn_str in FLAGS.least_cost_functions:
if '.' in cost_fn_str:
short_name = cost_fn_str.split('.')[-1]
else:
short_name = cost_fn_str
cost_fn_str = "%s.%s.%s" % (
__name__, self.__class__.__name__, short_name)
if not (short_name.startswith('%s_' % topic) or
short_name.startswith('noop')):
continue
try:
# NOTE: import_class is somewhat misnamed since
# the weighing function can be any non-class callable
# (i.e., no 'self')
cost_fn = utils.import_class(cost_fn_str)
except exception.ClassNotFound:
raise exception.SchedulerCostFunctionNotFound(
cost_fn_str=cost_fn_str)
try:
flag_name = "%s_weight" % cost_fn.__name__
weight = getattr(FLAGS, flag_name)
except AttributeError:
raise exception.SchedulerWeightFlagNotFound(
flag_name=flag_name)
cost_fns.append((weight, cost_fn))
self.cost_function_cache[topic] = cost_fns
return cost_fns

View File

@ -23,7 +23,7 @@ class AbstractHostFilter(object):
"""Convert instance_type into a filter for most common use-case.""" """Convert instance_type into a filter for most common use-case."""
raise NotImplementedError() raise NotImplementedError()
def filter_hosts(self, host_list, query): def filter_hosts(self, host_list, query, options):
"""Return a list of hosts that fulfill the filter.""" """Return a list of hosts that fulfill the filter."""
raise NotImplementedError() raise NotImplementedError()

View File

@ -26,6 +26,6 @@ class AllHostsFilter(abstract_filter.AbstractHostFilter):
""" """
return instance_type return instance_type
def filter_hosts(self, host_list, query): def filter_hosts(self, host_list, query, options):
"""Return the entire list of supplied hosts.""" """Return the entire list of supplied hosts."""
return list(host_list) return list(host_list)

View File

@ -51,7 +51,7 @@ class InstanceTypeFilter(abstract_filter.AbstractHostFilter):
free_ram_mb = host_info.free_ram_mb free_ram_mb = host_info.free_ram_mb
return free_ram_mb >= requested_ram return free_ram_mb >= requested_ram
def filter_hosts(self, host_list, query): def filter_hosts(self, host_list, query, options):
"""Return a list of hosts that can create instance_type.""" """Return a list of hosts that can create instance_type."""
instance_type = query instance_type = query
selected_hosts = [] selected_hosts = []

View File

@ -134,7 +134,7 @@ class JsonFilter(abstract_filter.AbstractHostFilter):
result = method(self, cooked_args) result = method(self, cooked_args)
return result return result
def filter_hosts(self, host_list, query): def filter_hosts(self, host_list, query, options):
"""Return a list of hosts that can fulfill the requirements """Return a list of hosts that can fulfill the requirements
specified in the query. specified in the query.
""" """

View File

@ -23,11 +23,8 @@ is then selected for provisioning.
""" """
import collections
from nova import flags from nova import flags
from nova import log as logging from nova import log as logging
from nova import utils
from nova import exception from nova import exception
LOG = logging.getLogger('nova.scheduler.least_cost') LOG = logging.getLogger('nova.scheduler.least_cost')
@ -46,9 +43,6 @@ flags.DEFINE_float('compute_fill_first_cost_fn_weight', 1.0,
'How much weight to give the fill-first cost function') 'How much weight to give the fill-first cost function')
COST_FUNCTION_CACHE = {}
class WeightedHost(object): class WeightedHost(object):
"""Reduced set of information about a host that has been weighed. """Reduced set of information about a host that has been weighed.
This is an attempt to remove some of the ad-hoc dict structures This is an attempt to remove some of the ad-hoc dict structures
@ -74,36 +68,18 @@ class WeightedHost(object):
return x return x
def noop_cost_fn(host_info): def noop_cost_fn(host_info, options=None):
"""Return a pre-weight cost of 1 for each host""" """Return a pre-weight cost of 1 for each host"""
return 1 return 1
def compute_fill_first_cost_fn(host_info): def compute_fill_first_cost_fn(host_info, options=None):
"""More free ram = higher weight. So servers will less free """More free ram = higher weight. So servers will less free
ram will be preferred.""" ram will be preferred."""
return host_info.free_ram_mb return host_info.free_ram_mb
def normalize_grid(grid): def weighted_sum(weighted_fns, host_list, options):
"""Normalize a grid of numbers by row."""
if not grid:
return [[]]
normalized = []
for row in grid:
if not row:
normalized.append([])
continue
mx = float(max(row))
if abs(mx) < 0.001:
normalized = [0.0] * len(row)
continue
normalized.append([float(col) / mx for col in row])
return normalized
def weighted_sum(host_list, weighted_fns):
"""Use the weighted-sum method to compute a score for an array of objects. """Use the weighted-sum method to compute a score for an array of objects.
Normalize the results of the objective-functions so that the weights are Normalize the results of the objective-functions so that the weights are
meaningful regardless of objective-function's range. meaningful regardless of objective-function's range.
@ -111,6 +87,7 @@ def weighted_sum(host_list, weighted_fns):
host_list - [(host, HostInfo()), ...] host_list - [(host, HostInfo()), ...]
weighted_fns - list of weights and functions like: weighted_fns - list of weights and functions like:
[(weight, objective-functions), ...] [(weight, objective-functions), ...]
options is an arbitrary dict of values.
Returns a single WeightedHost object which represents the best Returns a single WeightedHost object which represents the best
candidate. candidate.
@ -120,8 +97,8 @@ def weighted_sum(host_list, weighted_fns):
# One row per host. One column per function. # One row per host. One column per function.
scores = [] scores = []
for weight, fn in weighted_fns: for weight, fn in weighted_fns:
scores.append([fn(host_info) for hostname, host_info in host_list]) scores.append([fn(host_info, options) for hostname, host_info
scores = normalize_grid(scores) in host_list])
# Adjust the weights in the grid by the functions weight adjustment # Adjust the weights in the grid by the functions weight adjustment
# and sum them up to get a final list of weights. # and sum them up to get a final list of weights.
@ -143,54 +120,3 @@ def weighted_sum(host_list, weighted_fns):
final_scores = sorted(final_scores) final_scores = sorted(final_scores)
weight, (host, hostinfo) = final_scores[0] # Lowest score is the winner! weight, (host, hostinfo) = final_scores[0] # Lowest score is the winner!
return WeightedHost(weight, host=host, hostinfo=hostinfo) return WeightedHost(weight, host=host, hostinfo=hostinfo)
def get_cost_fns(topic=None):
"""Returns a list of tuples containing weights and cost functions to
use for weighing hosts
"""
global COST_FUNCTION_CACHE
cost_function_cache = COST_FUNCTION_CACHE
if topic is None:
# Schedulers only support compute right now.
topic = "compute"
if topic in cost_function_cache:
return cost_function_cache[topic]
cost_fns = []
for cost_fn_str in FLAGS.least_cost_functions:
if '.' in cost_fn_str:
short_name = cost_fn_str.split('.')[-1]
else:
short_name = cost_fn_str
cost_fn_str = "%s.%s.%s" % (
__name__, self.__class__.__name__, short_name)
if not (short_name.startswith('%s_' % topic) or
short_name.startswith('noop')):
continue
try:
# NOTE(sirp): import_class is somewhat misnamed since it can
# any callable from a module
cost_fn = utils.import_class(cost_fn_str)
except exception.ClassNotFound:
raise exception.SchedulerCostFunctionNotFound(
cost_fn_str=cost_fn_str)
try:
flag_name = "%s_weight" % cost_fn.__name__
weight = getattr(FLAGS, flag_name)
except AttributeError:
raise exception.SchedulerWeightFlagNotFound(
flag_name=flag_name)
cost_fns.append((weight, cost_fn))
cost_function_cache[topic] = cost_fns
return cost_fns
def weigh_hosts(request_spec, host_list):
"""Returns the best host as a WeightedHost."""
cost_fns = get_cost_fns()
return weighted_sum(host_list, cost_fns)

View File

@ -0,0 +1,98 @@
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SchedulerOptions monitors a local .json file for changes and loads
it if needed. This file is converted to a data structure and passed
into the filtering and weighing functions which can use it for
dynamic configuration.
"""
import datetime
import json
import os
from nova import flags
from nova import log as logging
FLAGS = flags.FLAGS
flags.DEFINE_string('scheduler_json_config_location',
'',
'Absolute path to scheduler configuration JSON file.')
LOG = logging.getLogger('nova.scheduler.scheduler_options')
class SchedulerOptions(object):
"""
SchedulerOptions monitors a local .json file for changes and loads it
if needed. This file is converted to a data structure and passed into
the filtering and weighing functions which can use it for dynamic
configuration.
"""
def __init__(self):
super(SchedulerOptions, self).__init__()
self.data = {}
self.last_modified = None
self.last_checked = None
def _get_file_handle(self, filename):
"""Get file handle. Broken out for testing."""
return open(filename)
def _get_file_timestamp(self, filename):
"""Get the last modified datetime. Broken out for testing."""
try:
return os.path.getmtime(filename)
except os.error, e:
LOG.exception(_("Could not stat scheduler options file "
"%(filename)s: '%(e)s'", locals()))
raise
def _load_file(self, handle):
"""Decode the JSON file. Broken out for testing."""
try:
return json.load(handle)
except ValueError, e:
LOG.exception(_("Could not decode scheduler options: "
"'%(e)s'") % locals())
return {}
def _get_time_now(self):
"""Get current UTC. Broken out for testing."""
return datetime.datetime.utcnow()
def get_configuration(self, filename=None):
"""Check the json file for changes and load it if needed."""
if not filename:
filename = FLAGS.scheduler_json_config_location
if not filename:
return self.data
if self.last_checked:
now = self._get_time_now()
if now - self.last_checked < datetime.timedelta(minutes=5):
return self.data
last_modified = self._get_file_timestamp(filename)
if not last_modified or not self.last_modified or \
last_modified > self.last_modified:
self.data = self._load_file(self._get_file_handle(filename))
self.last_modified = last_modified
if not self.data:
self.data = {}
return self.data

View File

@ -212,10 +212,11 @@ class DistributedSchedulerTestCase(test.TestCase):
self.next_weight = 1.0 self.next_weight = 1.0
def _fake_filter_hosts(topic, request_info, unfiltered_hosts): def _fake_filter_hosts(topic, request_info, unfiltered_hosts,
options):
return unfiltered_hosts return unfiltered_hosts
def _fake_weigh_hosts(request_info, hosts): def _fake_weighted_sum(functions, hosts, options):
self.next_weight += 2.0 self.next_weight += 2.0
host, hostinfo = hosts[0] host, hostinfo = hosts[0]
return least_cost.WeightedHost(self.next_weight, host=host, return least_cost.WeightedHost(self.next_weight, host=host,
@ -225,7 +226,7 @@ class DistributedSchedulerTestCase(test.TestCase):
fake_context = context.RequestContext('user', 'project') fake_context = context.RequestContext('user', 'project')
sched.zone_manager = ds_fakes.FakeZoneManager() sched.zone_manager = ds_fakes.FakeZoneManager()
self.stubs.Set(sched, '_filter_hosts', _fake_filter_hosts) self.stubs.Set(sched, '_filter_hosts', _fake_filter_hosts)
self.stubs.Set(least_cost, 'weigh_hosts', _fake_weigh_hosts) self.stubs.Set(least_cost, 'weighted_sum', _fake_weighted_sum)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
@ -260,3 +261,12 @@ class DistributedSchedulerTestCase(test.TestCase):
self.assertTrue(isinstance(weighted_host, least_cost.WeightedHost)) self.assertTrue(isinstance(weighted_host, least_cost.WeightedHost))
self.assertEqual(weighted_host.to_dict(), dict(weight=1, host='x', self.assertEqual(weighted_host.to_dict(), dict(weight=1, host='x',
blob='y', zone='z')) blob='y', zone='z'))
def test_get_cost_functions(self):
fixture = ds_fakes.FakeDistributedScheduler()
fns = fixture.get_cost_functions()
self.assertEquals(len(fns), 1)
weight, fn = fns[0]
self.assertEquals(weight, 1.0)
hostinfo = zone_manager.HostInfo('host', free_ram_mb=1000)
self.assertEquals(1000, fn(hostinfo))

View File

@ -122,7 +122,7 @@ class HostFilterTestCase(test.TestCase):
hf = hfs[0] hf = hfs[0]
all_hosts = self._get_all_hosts() all_hosts = self._get_all_hosts()
cooked = hf.instance_type_to_filter(self.instance_type) cooked = hf.instance_type_to_filter(self.instance_type)
hosts = hf.filter_hosts(all_hosts, cooked) hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(4, len(hosts)) self.assertEquals(4, len(hosts))
for host, capabilities in hosts: for host, capabilities in hosts:
self.assertTrue(host.startswith('host')) self.assertTrue(host.startswith('host'))
@ -132,7 +132,7 @@ class HostFilterTestCase(test.TestCase):
# filter all hosts that can support 30 ram and 300 disk # filter all hosts that can support 30 ram and 300 disk
cooked = hf.instance_type_to_filter(self.instance_type) cooked = hf.instance_type_to_filter(self.instance_type)
all_hosts = self._get_all_hosts() all_hosts = self._get_all_hosts()
hosts = hf.filter_hosts(all_hosts, cooked) hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(3, len(hosts)) self.assertEquals(3, len(hosts))
just_hosts = [host for host, hostinfo in hosts] just_hosts = [host for host, hostinfo in hosts]
just_hosts.sort() just_hosts.sort()
@ -147,7 +147,7 @@ class HostFilterTestCase(test.TestCase):
# reserving 2048 ram # reserving 2048 ram
cooked = hf.instance_type_to_filter(self.instance_type) cooked = hf.instance_type_to_filter(self.instance_type)
all_hosts = self._get_all_hosts() all_hosts = self._get_all_hosts()
hosts = hf.filter_hosts(all_hosts, cooked) hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(2, len(hosts)) self.assertEquals(2, len(hosts))
just_hosts = [host for host, hostinfo in hosts] just_hosts = [host for host, hostinfo in hosts]
just_hosts.sort() just_hosts.sort()
@ -159,7 +159,7 @@ class HostFilterTestCase(test.TestCase):
# filter all hosts that can support 30 ram and 300 disk # filter all hosts that can support 30 ram and 300 disk
cooked = hf.instance_type_to_filter(self.gpu_instance_type) cooked = hf.instance_type_to_filter(self.gpu_instance_type)
all_hosts = self._get_all_hosts() all_hosts = self._get_all_hosts()
hosts = hf.filter_hosts(all_hosts, cooked) hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(1, len(hosts)) self.assertEquals(1, len(hosts))
just_hosts = [host for host, caps in hosts] just_hosts = [host for host, caps in hosts]
self.assertEquals('host4', just_hosts[0]) self.assertEquals('host4', just_hosts[0])
@ -169,7 +169,7 @@ class HostFilterTestCase(test.TestCase):
# filter all hosts that can support 30 ram and 300 disk # filter all hosts that can support 30 ram and 300 disk
cooked = hf.instance_type_to_filter(self.instance_type) cooked = hf.instance_type_to_filter(self.instance_type)
all_hosts = self._get_all_hosts() all_hosts = self._get_all_hosts()
hosts = hf.filter_hosts(all_hosts, cooked) hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(2, len(hosts)) self.assertEquals(2, len(hosts))
just_hosts = [host for host, caps in hosts] just_hosts = [host for host, caps in hosts]
just_hosts.sort() just_hosts.sort()
@ -189,7 +189,7 @@ class HostFilterTestCase(test.TestCase):
] ]
] ]
cooked = json.dumps(raw) cooked = json.dumps(raw)
hosts = hf.filter_hosts(all_hosts, cooked) hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(3, len(hosts)) self.assertEquals(3, len(hosts))
just_hosts = [host for host, caps in hosts] just_hosts = [host for host, caps in hosts]
@ -201,7 +201,7 @@ class HostFilterTestCase(test.TestCase):
['=', '$compute.host_memory_free', 30], ['=', '$compute.host_memory_free', 30],
] ]
cooked = json.dumps(raw) cooked = json.dumps(raw)
hosts = hf.filter_hosts(all_hosts, cooked) hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(3, len(hosts)) self.assertEquals(3, len(hosts))
just_hosts = [host for host, caps in hosts] just_hosts = [host for host, caps in hosts]
@ -211,7 +211,7 @@ class HostFilterTestCase(test.TestCase):
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
cooked = json.dumps(raw) cooked = json.dumps(raw)
hosts = hf.filter_hosts(all_hosts, cooked) hosts = hf.filter_hosts(all_hosts, cooked, {})
self.assertEquals(2, len(hosts)) self.assertEquals(2, len(hosts))
just_hosts = [host for host, caps in hosts] just_hosts = [host for host, caps in hosts]
just_hosts.sort() just_hosts.sort()
@ -222,32 +222,32 @@ class HostFilterTestCase(test.TestCase):
raw = ['unknown command', ] raw = ['unknown command', ]
cooked = json.dumps(raw) cooked = json.dumps(raw)
try: try:
hf.filter_hosts(all_hosts, cooked) hf.filter_hosts(all_hosts, cooked, {})
self.fail("Should give KeyError") self.fail("Should give KeyError")
except KeyError, e: except KeyError, e:
pass pass
self.assertTrue(hf.filter_hosts(all_hosts, json.dumps([]))) self.assertTrue(hf.filter_hosts(all_hosts, json.dumps([]), {}))
self.assertTrue(hf.filter_hosts(all_hosts, json.dumps({}))) self.assertTrue(hf.filter_hosts(all_hosts, json.dumps({}), {}))
self.assertTrue(hf.filter_hosts(all_hosts, json.dumps( self.assertTrue(hf.filter_hosts(all_hosts, json.dumps(
['not', True, False, True, False], ['not', True, False, True, False],
))) ), {}))
try: try:
hf.filter_hosts(all_hosts, json.dumps( hf.filter_hosts(all_hosts, json.dumps(
'not', True, False, True, False, 'not', True, False, True, False,), {})
))
self.fail("Should give KeyError") self.fail("Should give KeyError")
except KeyError, e: except KeyError, e:
pass pass
self.assertFalse(hf.filter_hosts(all_hosts, self.assertFalse(hf.filter_hosts(all_hosts,
json.dumps(['=', '$foo', 100]))) json.dumps(['=', '$foo', 100]), {}))
self.assertFalse(hf.filter_hosts(all_hosts, self.assertFalse(hf.filter_hosts(all_hosts,
json.dumps(['=', '$.....', 100]))) json.dumps(['=', '$.....', 100]), {}))
self.assertFalse(hf.filter_hosts(all_hosts, self.assertFalse(hf.filter_hosts(all_hosts,
json.dumps( json.dumps(
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]))) ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]),
{}))
self.assertFalse(hf.filter_hosts(all_hosts, self.assertFalse(hf.filter_hosts(all_hosts,
json.dumps(['=', {}, ['>', '$missing....foo']]))) json.dumps(['=', {}, ['>', '$missing....foo']]), {}))

View File

@ -21,11 +21,11 @@ from nova import test
from nova.tests.scheduler import fake_zone_manager from nova.tests.scheduler import fake_zone_manager
def offset(hostinfo): def offset(hostinfo, options):
return hostinfo.free_ram_mb + 10000 return hostinfo.free_ram_mb + 10000
def scale(hostinfo): def scale(hostinfo, options):
return hostinfo.free_ram_mb * 2 return hostinfo.free_ram_mb * 2
@ -39,23 +39,6 @@ class LeastCostTestCase(test.TestCase):
def tearDown(self): def tearDown(self):
super(LeastCostTestCase, self).tearDown() super(LeastCostTestCase, self).tearDown()
def test_normalize_grid(self):
raw = [
[1, 2, 3, 4, 5],
[10, 20, 30, 40, 50],
[100, 200, 300, 400, 500],
]
expected = [
[.2, .4, .6, .8, 1.0],
[.2, .4, .6, .8, 1.0],
[.2, .4, .6, .8, 1.0],
]
self.assertEquals(expected, least_cost.normalize_grid(raw))
self.assertEquals([[]], least_cost.normalize_grid([]))
self.assertEquals([[]], least_cost.normalize_grid([[]]))
def test_weighted_sum_happy_day(self): def test_weighted_sum_happy_day(self):
fn_tuples = [(1.0, offset), (1.0, scale)] fn_tuples = [(1.0, offset), (1.0, scale)]
hostinfo_list = self.zone_manager.get_all_host_data(None).items() hostinfo_list = self.zone_manager.get_all_host_data(None).items()
@ -69,16 +52,14 @@ class LeastCostTestCase(test.TestCase):
# [10000, 11536, 13072, 18192] # [10000, 11536, 13072, 18192]
# [0, 768, 1536, 4096] # [0, 768, 1536, 4096]
# normalized =
# [ 0.55, 0.63, 0.72, 1.0]
# [ 0.0, 0.19, 0.38, 1.0]
# adjusted [ 1.0 * x + 1.0 * y] = # adjusted [ 1.0 * x + 1.0 * y] =
# [0.55, 0.82, 1.1, 2.0] # [10000, 12304, 14608, 22288]
# so, host1 should win: # so, host1 should win:
weighted_host = least_cost.weighted_sum(hostinfo_list, fn_tuples) options = {}
self.assertTrue(abs(weighted_host.weight - 0.55) < 0.01) weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
options)
self.assertEqual(weighted_host.weight, 10000)
self.assertEqual(weighted_host.host, 'host1') self.assertEqual(weighted_host.host, 'host1')
def test_weighted_sum_single_function(self): def test_weighted_sum_single_function(self):
@ -93,18 +74,9 @@ class LeastCostTestCase(test.TestCase):
# [offset, ]= # [offset, ]=
# [10000, 11536, 13072, 18192] # [10000, 11536, 13072, 18192]
# normalized =
# [ 0.55, 0.63, 0.72, 1.0]
# so, host1 should win: # so, host1 should win:
weighted_host = least_cost.weighted_sum(hostinfo_list, fn_tuples) options = {}
self.assertTrue(abs(weighted_host.weight - 0.55) < 0.01) weighted_host = least_cost.weighted_sum(fn_tuples, hostinfo_list,
options)
self.assertEqual(weighted_host.weight, 10000)
self.assertEqual(weighted_host.host, 'host1') self.assertEqual(weighted_host.host, 'host1')
def test_get_cost_functions(self):
fns = least_cost.get_cost_fns()
self.assertEquals(len(fns), 1)
weight, fn = fns[0]
self.assertEquals(weight, 1.0)
hostinfo = zone_manager.HostInfo('host', free_ram_mb=1000)
self.assertEquals(1000, fn(hostinfo))

View File

@ -0,0 +1,138 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For PickledScheduler.
"""
import datetime
import json
import StringIO
from nova.scheduler import scheduler_options
from nova import test
class FakeSchedulerOptions(scheduler_options.SchedulerOptions):
def __init__(self, last_checked, now, file_old, file_now, data, filedata):
super(FakeSchedulerOptions, self).__init__()
# Change internals ...
self.last_modified = file_old
self.last_checked = last_checked
self.data = data
# For overrides ...
self._time_now = now
self._file_now = file_now
self._file_data = filedata
self.file_was_loaded = False
def _get_file_timestamp(self, filename):
return self._file_now
def _get_file_handle(self, filename):
self.file_was_loaded = True
return StringIO.StringIO(self._file_data)
def _get_time_now(self):
return self._time_now
class SchedulerOptionsTestCase(test.TestCase):
def test_get_configuration_first_time_no_flag(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = json.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEquals({}, fake.get_configuration())
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_first_time_empty_file(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = ""
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEquals({}, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_first_time_happy_day(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = json.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEquals(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_second_time_no_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = json.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
data, jdata)
self.assertEquals(data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_too_fast(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2011, 1, 1, 1, 1, 2)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = json.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEquals(old_data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = json.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEquals(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)