got tests passing with logic changes
This commit is contained in:
@@ -45,20 +45,19 @@ LOG = logging.getLogger('nova.scheduler.abstract_scheduler')
|
||||
|
||||
class InvalidBlob(exception.NovaException):
|
||||
message = _("Ill-formed or incorrectly routed 'blob' data sent "
|
||||
"to instance create request.")
|
||||
"to instance create request.")
|
||||
|
||||
|
||||
class AbstractScheduler(driver.Scheduler):
|
||||
"""Base class for creating Schedulers that can work across any nova
|
||||
deployment, from simple designs to multiply-nested zones.
|
||||
"""
|
||||
|
||||
def _call_zone_method(self, context, method, specs, zones):
|
||||
"""Call novaclient zone method. Broken out for testing."""
|
||||
return api.call_zone_method(context, method, specs=specs, zones=zones)
|
||||
|
||||
def _provision_resource_locally(self, context, build_plan_item,
|
||||
request_spec, kwargs):
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource in this Zone."""
|
||||
host = build_plan_item['hostname']
|
||||
base_options = request_spec['instance_properties']
|
||||
@@ -68,21 +67,21 @@ class AbstractScheduler(driver.Scheduler):
|
||||
# support at some point? Also, OS API has no concept of security
|
||||
# groups.
|
||||
instance = compute_api.API().create_db_entry_for_new_instance(context,
|
||||
image, base_options, None, [])
|
||||
image, base_options, None, [])
|
||||
|
||||
instance_id = instance['id']
|
||||
kwargs['instance_id'] = instance_id
|
||||
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, "compute", host),
|
||||
{"method": "run_instance",
|
||||
"args": kwargs})
|
||||
queue = db.queue_get_for(context, "compute", host)
|
||||
params = {"method": "run_instance", "args": kwargs}
|
||||
rpc.cast(context, queue, params)
|
||||
LOG.debug(_("Provisioning locally via compute node %(host)s")
|
||||
% locals())
|
||||
% locals())
|
||||
|
||||
def _decrypt_blob(self, blob):
|
||||
"""Returns the decrypted blob or None if invalid. Broken out
|
||||
for testing."""
|
||||
for testing.
|
||||
"""
|
||||
decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
|
||||
try:
|
||||
json_entry = decryptor(blob)
|
||||
@@ -92,15 +91,15 @@ class AbstractScheduler(driver.Scheduler):
|
||||
return None
|
||||
|
||||
def _ask_child_zone_to_create_instance(self, context, zone_info,
|
||||
request_spec, kwargs):
|
||||
request_spec, kwargs):
|
||||
"""Once we have determined that the request should go to one
|
||||
of our children, we need to fabricate a new POST /servers/
|
||||
call with the same parameters that were passed into us.
|
||||
|
||||
Note that we have to reverse engineer from our args to get back the
|
||||
image, flavor, ipgroup, etc. since the original call could have
|
||||
come in from EC2 (which doesn't use these things)."""
|
||||
|
||||
come in from EC2 (which doesn't use these things).
|
||||
"""
|
||||
instance_type = request_spec['instance_type']
|
||||
instance_properties = request_spec['instance_properties']
|
||||
|
||||
@@ -109,30 +108,26 @@ class AbstractScheduler(driver.Scheduler):
|
||||
meta = instance_properties['metadata']
|
||||
flavor_id = instance_type['flavorid']
|
||||
reservation_id = instance_properties['reservation_id']
|
||||
|
||||
files = kwargs['injected_files']
|
||||
ipgroup = None # Not supported in OS API ... yet
|
||||
|
||||
child_zone = zone_info['child_zone']
|
||||
child_blob = zone_info['child_blob']
|
||||
zone = db.zone_get(context, child_zone)
|
||||
url = zone.api_url
|
||||
LOG.debug(_("Forwarding instance create call to child zone %(url)s"
|
||||
". ReservationID=%(reservation_id)s")
|
||||
% locals())
|
||||
". ReservationID=%(reservation_id)s") % locals())
|
||||
nova = None
|
||||
try:
|
||||
nova = novaclient.Client(zone.username, zone.password, None, url)
|
||||
nova.authenticate()
|
||||
except novaclient_exceptions.BadRequest, e:
|
||||
raise exception.NotAuthorized(_("Bad credentials attempting "
|
||||
"to talk to zone at %(url)s.") % locals())
|
||||
|
||||
"to talk to zone at %(url)s.") % locals())
|
||||
nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
|
||||
child_blob, reservation_id=reservation_id)
|
||||
child_blob, reservation_id=reservation_id)
|
||||
|
||||
def _provision_resource_from_blob(self, context, build_plan_item,
|
||||
instance_id, request_spec, kwargs):
|
||||
instance_id, request_spec, kwargs):
|
||||
"""Create the requested resource locally or in a child zone
|
||||
based on what is stored in the zone blob info.
|
||||
|
||||
@@ -145,8 +140,8 @@ class AbstractScheduler(driver.Scheduler):
|
||||
means we gathered the info from one of our children.
|
||||
It's possible that, when we decrypt the 'blob' field, it
|
||||
contains "child_blob" data. In which case we forward the
|
||||
request."""
|
||||
|
||||
request.
|
||||
"""
|
||||
host_info = None
|
||||
if "blob" in build_plan_item:
|
||||
# Request was passed in from above. Is it for us?
|
||||
@@ -161,21 +156,20 @@ class AbstractScheduler(driver.Scheduler):
|
||||
# Valid data ... is it for us?
|
||||
if 'child_zone' in host_info and 'child_blob' in host_info:
|
||||
self._ask_child_zone_to_create_instance(context, host_info,
|
||||
request_spec, kwargs)
|
||||
request_spec, kwargs)
|
||||
else:
|
||||
self._provision_resource_locally(context, host_info, request_spec,
|
||||
kwargs)
|
||||
kwargs)
|
||||
|
||||
def _provision_resource(self, context, build_plan_item, instance_id,
|
||||
request_spec, kwargs):
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource in this Zone or a child zone."""
|
||||
if "hostname" in build_plan_item:
|
||||
self._provision_resource_locally(context, build_plan_item,
|
||||
request_spec, kwargs)
|
||||
request_spec, kwargs)
|
||||
return
|
||||
|
||||
self._provision_resource_from_blob(context, build_plan_item,
|
||||
instance_id, request_spec, kwargs)
|
||||
instance_id, request_spec, kwargs)
|
||||
|
||||
def _adjust_child_weights(self, child_results, zones):
|
||||
"""Apply the Scale and Offset values from the Zone definition
|
||||
@@ -231,7 +225,6 @@ class AbstractScheduler(driver.Scheduler):
|
||||
for num in xrange(num_instances):
|
||||
if not build_plan:
|
||||
break
|
||||
|
||||
build_plan_item = build_plan.pop(0)
|
||||
self._provision_resource(context, build_plan_item, instance_id,
|
||||
request_spec, kwargs)
|
||||
|
@@ -43,40 +43,13 @@ class BaseScheduler(abstract_scheduler.AbstractScheduler):
|
||||
# TODO(sandy): We're only using InstanceType-based specs
|
||||
# currently. Later we'll need to snoop for more detailed
|
||||
# host filter requests.
|
||||
instance_type = request_spec['instance_type']
|
||||
instance_type = request_spec.get("instance_type", None)
|
||||
if instance_type is None:
|
||||
# No way to select; return the specified hosts
|
||||
return hosts or []
|
||||
name, query = selected_filter.instance_type_to_filter(instance_type)
|
||||
return selected_filter.filter_hosts(self.zone_manager, query)
|
||||
|
||||
def filter_hosts(self, topic, request_spec, host_list=None):
|
||||
"""Return a list of hosts which are acceptable for scheduling.
|
||||
Return value should be a list of (hostname, capability_dict)s.
|
||||
Derived classes may override this, but may find the
|
||||
'<topic>_filter' function more appropriate.
|
||||
"""
|
||||
def _default_filter(self, hostname, capabilities, request_spec):
|
||||
"""Default filter function if there's no <topic>_filter"""
|
||||
# NOTE(sirp): The default logic is the equivalent to
|
||||
# AllHostsFilter
|
||||
return True
|
||||
|
||||
filter_func = getattr(self, '%s_filter' % topic, _default_filter)
|
||||
|
||||
if host_list is None:
|
||||
first_run = True
|
||||
host_list = self.zone_manager.service_states.iteritems()
|
||||
else:
|
||||
first_run = False
|
||||
|
||||
filtered_hosts = []
|
||||
for host, services in host_list:
|
||||
if first_run:
|
||||
if topic not in services:
|
||||
continue
|
||||
services = services[topic]
|
||||
if filter_func(host, services, request_spec):
|
||||
filtered_hosts.append((host, services))
|
||||
return filtered_hosts
|
||||
|
||||
def weigh_hosts(self, topic, request_spec, hosts):
|
||||
"""Derived classes may override this to provide more sophisticated
|
||||
scheduling objectives
|
||||
@@ -84,18 +57,3 @@ class BaseScheduler(abstract_scheduler.AbstractScheduler):
|
||||
# NOTE(sirp): The default logic is the same as the NoopCostFunction
|
||||
return [dict(weight=1, hostname=hostname, capabilities=capabilities)
|
||||
for hostname, capabilities in hosts]
|
||||
|
||||
def compute_consume(self, capabilities, instance_type):
|
||||
"""Consume compute resources for selected host"""
|
||||
|
||||
requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024
|
||||
capabilities['host_memory_free'] -= requested_mem
|
||||
|
||||
def consume_resources(self, topic, capabilities, instance_type):
|
||||
"""Consume resources for a specific host. 'host' is a tuple
|
||||
of the hostname and the services"""
|
||||
|
||||
consume_func = getattr(self, '%s_consume' % topic, None)
|
||||
if not consume_func:
|
||||
return
|
||||
consume_func(capabilities, instance_type)
|
||||
|
@@ -20,43 +20,32 @@ either incompatible or insufficient to accept a newly-requested instance
|
||||
are removed by Host Filter classes from consideration. Those that pass
|
||||
the filter are then passed on for weighting or other process for ordering.
|
||||
|
||||
Three filters are included: AllHosts, Flavor & JSON. AllHosts just
|
||||
returns the full, unfiltered list of hosts. Flavor is a hard coded
|
||||
matching mechanism based on flavor criteria and JSON is an ad-hoc
|
||||
filter grammar.
|
||||
|
||||
Why JSON? The requests for instances may come in through the
|
||||
REST interface from a user or a parent Zone.
|
||||
Currently Flavors and/or InstanceTypes are used for
|
||||
specifing the type of instance desired. Specific Nova users have
|
||||
noted a need for a more expressive way of specifying instances.
|
||||
Since we don't want to get into building full DSL this is a simple
|
||||
form as an example of how this could be done. In reality, most
|
||||
consumers will use the more rigid filters such as FlavorFilter.
|
||||
Filters are in the 'filters' directory that is off the 'scheduler'
|
||||
directory of nova. Additional filters can be created and added to that
|
||||
directory; be sure to add them to the filters/__init__.py file so that
|
||||
they are part of the nova.schedulers.filters namespace.
|
||||
"""
|
||||
|
||||
import json
|
||||
import types
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
|
||||
import nova.scheduler
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova.scheduler.host_filter')
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def _get_filters():
|
||||
# Imported here to avoid circular imports
|
||||
from nova.scheduler import filters
|
||||
def get_itm(nm):
|
||||
return getattr(filters, nm)
|
||||
|
||||
return [get_itm(itm) for itm in dir(filters)
|
||||
if (type(get_itm(itm)) is types.TypeType)
|
||||
and issubclass(get_itm(itm), filters.AbstractHostFilter)]
|
||||
and issubclass(get_itm(itm), filters.AbstractHostFilter)
|
||||
and get_itm(itm) is not filters.AbstractHostFilter]
|
||||
|
||||
|
||||
def choose_host_filter(filter_name=None):
|
||||
|
@@ -22,14 +22,12 @@ The cost-function and weights are tabulated, and the host with the least cost
|
||||
is then selected for provisioning.
|
||||
"""
|
||||
|
||||
# TODO(dabo): This class will be removed in the next merge prop; it remains now
|
||||
# because much of the code will be refactored into different classes.
|
||||
|
||||
import collections
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.scheduler import abstract_scheduler
|
||||
from nova.scheduler import base_scheduler
|
||||
from nova import utils
|
||||
from nova import exception
|
||||
|
||||
@@ -37,14 +35,16 @@ LOG = logging.getLogger('nova.scheduler.least_cost')
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_list('least_cost_scheduler_cost_functions',
|
||||
['nova.scheduler.least_cost.noop_cost_fn'],
|
||||
'Which cost functions the LeastCostScheduler should use.')
|
||||
['nova.scheduler.least_cost.noop_cost_fn'],
|
||||
'Which cost functions the LeastCostScheduler should use.')
|
||||
|
||||
|
||||
# TODO(sirp): Once we have enough of these rules, we can break them out into a
|
||||
# cost_functions.py file (perhaps in a least_cost_scheduler directory)
|
||||
flags.DEFINE_integer('noop_cost_fn_weight', 1,
|
||||
'How much weight to give the noop cost function')
|
||||
'How much weight to give the noop cost function')
|
||||
flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
|
||||
'How much weight to give the fill-first cost function')
|
||||
|
||||
|
||||
def noop_cost_fn(host):
|
||||
@@ -52,87 +52,20 @@ def noop_cost_fn(host):
|
||||
return 1
|
||||
|
||||
|
||||
flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
|
||||
'How much weight to give the fill-first cost function')
|
||||
|
||||
|
||||
def compute_fill_first_cost_fn(host):
|
||||
"""Prefer hosts that have less ram available, filter_hosts will exclude
|
||||
hosts that don't have enough ram"""
|
||||
hostname, caps = host
|
||||
free_mem = caps['host_memory_free']
|
||||
hosts that don't have enough ram.
|
||||
"""
|
||||
hostname, service = host
|
||||
caps = service.get("compute", {})
|
||||
free_mem = caps.get("host_memory_free", 0)
|
||||
return free_mem
|
||||
|
||||
|
||||
class LeastCostScheduler(abstract_scheduler.AbstractScheduler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.cost_fns_cache = {}
|
||||
super(LeastCostScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_cost_fns(self, topic):
|
||||
"""Returns a list of tuples containing weights and cost functions to
|
||||
use for weighing hosts
|
||||
"""
|
||||
|
||||
if topic in self.cost_fns_cache:
|
||||
return self.cost_fns_cache[topic]
|
||||
|
||||
cost_fns = []
|
||||
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
||||
if '.' in cost_fn_str:
|
||||
short_name = cost_fn_str.split('.')[-1]
|
||||
else:
|
||||
short_name = cost_fn_str
|
||||
cost_fn_str = "%s.%s.%s" % (
|
||||
__name__, self.__class__.__name__, short_name)
|
||||
|
||||
if not (short_name.startswith('%s_' % topic) or
|
||||
short_name.startswith('noop')):
|
||||
continue
|
||||
|
||||
try:
|
||||
# NOTE(sirp): import_class is somewhat misnamed since it can
|
||||
# any callable from a module
|
||||
cost_fn = utils.import_class(cost_fn_str)
|
||||
except exception.ClassNotFound:
|
||||
raise exception.SchedulerCostFunctionNotFound(
|
||||
cost_fn_str=cost_fn_str)
|
||||
|
||||
try:
|
||||
flag_name = "%s_weight" % cost_fn.__name__
|
||||
weight = getattr(FLAGS, flag_name)
|
||||
except AttributeError:
|
||||
raise exception.SchedulerWeightFlagNotFound(
|
||||
flag_name=flag_name)
|
||||
|
||||
cost_fns.append((weight, cost_fn))
|
||||
|
||||
self.cost_fns_cache[topic] = cost_fns
|
||||
return cost_fns
|
||||
|
||||
def weigh_hosts(self, topic, request_spec, hosts):
|
||||
"""Returns a list of dictionaries of form:
|
||||
[ {weight: weight, hostname: hostname, capabilities: capabs} ]
|
||||
"""
|
||||
|
||||
cost_fns = self.get_cost_fns(topic)
|
||||
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
||||
|
||||
weighted = []
|
||||
weight_log = []
|
||||
for cost, (hostname, caps) in zip(costs, hosts):
|
||||
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||
weight_dict = dict(weight=cost, hostname=hostname,
|
||||
capabilities=caps)
|
||||
weighted.append(weight_dict)
|
||||
|
||||
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
||||
return weighted
|
||||
|
||||
|
||||
def normalize_list(L):
|
||||
"""Normalize an array of numbers such that each element satisfies:
|
||||
0 <= e <= 1"""
|
||||
0 <= e <= 1
|
||||
"""
|
||||
if not L:
|
||||
return L
|
||||
max_ = max(L)
|
||||
@@ -160,12 +93,10 @@ def weighted_sum(domain, weighted_fns, normalize=True):
|
||||
score_table = collections.defaultdict(list)
|
||||
for weight, fn in weighted_fns:
|
||||
scores = [fn(elem) for elem in domain]
|
||||
|
||||
if normalize:
|
||||
norm_scores = normalize_list(scores)
|
||||
else:
|
||||
norm_scores = scores
|
||||
|
||||
for idx, score in enumerate(norm_scores):
|
||||
weighted_score = score * weight
|
||||
score_table[idx].append(weighted_score)
|
||||
@@ -175,5 +106,66 @@ def weighted_sum(domain, weighted_fns, normalize=True):
|
||||
for idx in sorted(score_table):
|
||||
elem_score = sum(score_table[idx])
|
||||
domain_scores.append(elem_score)
|
||||
|
||||
return domain_scores
|
||||
|
||||
|
||||
class LeastCostScheduler(base_scheduler.BaseScheduler):
|
||||
def __init__(self, *args, **kwargs):
|
||||
self.cost_fns_cache = {}
|
||||
super(LeastCostScheduler, self).__init__(*args, **kwargs)
|
||||
|
||||
def get_cost_fns(self, topic):
|
||||
"""Returns a list of tuples containing weights and cost functions to
|
||||
use for weighing hosts
|
||||
"""
|
||||
if topic in self.cost_fns_cache:
|
||||
return self.cost_fns_cache[topic]
|
||||
cost_fns = []
|
||||
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
||||
if '.' in cost_fn_str:
|
||||
short_name = cost_fn_str.split('.')[-1]
|
||||
else:
|
||||
short_name = cost_fn_str
|
||||
cost_fn_str = "%s.%s.%s" % (
|
||||
__name__, self.__class__.__name__, short_name)
|
||||
if not (short_name.startswith('%s_' % topic) or
|
||||
short_name.startswith('noop')):
|
||||
continue
|
||||
|
||||
try:
|
||||
# NOTE(sirp): import_class is somewhat misnamed since it can
|
||||
# any callable from a module
|
||||
cost_fn = utils.import_class(cost_fn_str)
|
||||
except exception.ClassNotFound:
|
||||
raise exception.SchedulerCostFunctionNotFound(
|
||||
cost_fn_str=cost_fn_str)
|
||||
|
||||
try:
|
||||
flag_name = "%s_weight" % cost_fn.__name__
|
||||
weight = getattr(FLAGS, flag_name)
|
||||
except AttributeError:
|
||||
raise exception.SchedulerWeightFlagNotFound(
|
||||
flag_name=flag_name)
|
||||
cost_fns.append((weight, cost_fn))
|
||||
|
||||
self.cost_fns_cache[topic] = cost_fns
|
||||
return cost_fns
|
||||
|
||||
def weigh_hosts(self, topic, request_spec, hosts):
|
||||
"""Returns a list of dictionaries of form:
|
||||
[ {weight: weight, hostname: hostname, capabilities: capabs} ]
|
||||
"""
|
||||
cost_fns = self.get_cost_fns(topic)
|
||||
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
||||
|
||||
weighted = []
|
||||
weight_log = []
|
||||
for cost, (hostname, service) in zip(costs, hosts):
|
||||
caps = service[topic]
|
||||
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||
weight_dict = dict(weight=cost, hostname=hostname,
|
||||
capabilities=caps)
|
||||
weighted.append(weight_dict)
|
||||
|
||||
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
||||
return weighted
|
||||
|
@@ -192,9 +192,7 @@ class HostFilterTestCase(test.TestCase):
|
||||
msg = " ".join([str(arg) for arg in args])
|
||||
dbg.write("%s\n" % msg)
|
||||
|
||||
debug("cooked", cooked, type(cooked))
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
|
@@ -15,6 +15,7 @@
|
||||
"""
|
||||
Tests For Least Cost Scheduler
|
||||
"""
|
||||
import copy
|
||||
|
||||
from nova import test
|
||||
from nova.scheduler import least_cost
|
||||
@@ -81,7 +82,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
super(LeastCostSchedulerTestCase, self).tearDown()
|
||||
|
||||
def assertWeights(self, expected, num, request_spec, hosts):
|
||||
weighted = self.sched.weigh_hosts(num, request_spec, hosts)
|
||||
weighted = self.sched.weigh_hosts("compute", request_spec, hosts)
|
||||
self.assertDictListMatch(weighted, expected, approx_equal=True)
|
||||
|
||||
def test_no_hosts(self):
|
||||
@@ -125,19 +126,20 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
num = 1
|
||||
instance_type = {'memory_mb': 1024}
|
||||
request_spec = {'instance_type': instance_type}
|
||||
all_hosts = self.sched.zone_manager.service_states.iteritems()
|
||||
svc_states = self.sched.zone_manager.service_states.iteritems()
|
||||
all_hosts = [(host, services["compute"])
|
||||
for host, services in all_hosts
|
||||
for host, services in svc_states
|
||||
if "compute" in services]
|
||||
hosts = self.sched.filter_hosts('compute', request_spec, host_list)
|
||||
hosts = self.sched.filter_hosts('compute', request_spec, all_hosts)
|
||||
|
||||
expected = []
|
||||
for idx, (hostname, caps) in enumerate(hosts):
|
||||
for idx, (hostname, services) in enumerate(hosts):
|
||||
caps = copy.deepcopy(services["compute"])
|
||||
# Costs are normalized so over 10 hosts, each host with increasing
|
||||
# free ram will cost 1/N more. Since the lowest cost host has some
|
||||
# free ram, we add in the 1/N for the base_cost
|
||||
weight = 0.1 + (0.1 * idx)
|
||||
weight_dict = dict(weight=weight, hostname=hostname)
|
||||
expected.append(weight_dict)
|
||||
wtd_dict = dict(hostname=hostname, weight=weight, capabilities=caps)
|
||||
expected.append(wtd_dict)
|
||||
|
||||
self.assertWeights(expected, num, request_spec, hosts)
|
||||
|
@@ -1,200 +0,0 @@
|
||||
# Copyright 2011 OpenStack LLC.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Tests For Scheduler Host Filters.
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
from nova import exception
|
||||
from nova import test
|
||||
from nova.scheduler import host_filter
|
||||
|
||||
|
||||
class FakeZoneManager:
|
||||
pass
|
||||
|
||||
|
||||
class HostFilterTestCase(test.TestCase):
|
||||
"""Test case for host filters."""
|
||||
|
||||
def _host_caps(self, multiplier):
|
||||
# Returns host capabilities in the following way:
|
||||
# host1 = memory:free 10 (100max)
|
||||
# disk:available 100 (1000max)
|
||||
# hostN = memory:free 10 + 10N
|
||||
# disk:available 100 + 100N
|
||||
# in other words: hostN has more resources than host0
|
||||
# which means ... don't go above 10 hosts.
|
||||
return {'host_name-description': 'XenServer %s' % multiplier,
|
||||
'host_hostname': 'xs-%s' % multiplier,
|
||||
'host_memory_total': 100,
|
||||
'host_memory_overhead': 10,
|
||||
'host_memory_free': 10 + multiplier * 10,
|
||||
'host_memory_free-computed': 10 + multiplier * 10,
|
||||
'host_other-config': {},
|
||||
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
||||
'host_cpu_info': {},
|
||||
'disk_available': 100 + multiplier * 100,
|
||||
'disk_total': 1000,
|
||||
'disk_used': 0,
|
||||
'host_uuid': 'xxx-%d' % multiplier,
|
||||
'host_name-label': 'xs-%s' % multiplier}
|
||||
|
||||
def setUp(self):
|
||||
super(HostFilterTestCase, self).setUp()
|
||||
default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter'
|
||||
self.flags(default_host_filter=default_host_filter)
|
||||
self.instance_type = dict(name='tiny',
|
||||
memory_mb=50,
|
||||
vcpus=10,
|
||||
local_gb=500,
|
||||
flavorid=1,
|
||||
swap=500,
|
||||
rxtx_quota=30000,
|
||||
rxtx_cap=200,
|
||||
extra_specs={})
|
||||
|
||||
self.zone_manager = FakeZoneManager()
|
||||
states = {}
|
||||
for x in xrange(10):
|
||||
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
|
||||
self.zone_manager.service_states = states
|
||||
|
||||
def test_choose_filter(self):
|
||||
# Test default filter ...
|
||||
hf = host_filter.choose_host_filter()
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.AllHostsFilter')
|
||||
# Test valid filter ...
|
||||
hf = host_filter.choose_host_filter(
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
self.assertEquals(hf._full_name(),
|
||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
||||
# Test invalid filter ...
|
||||
try:
|
||||
host_filter.choose_host_filter('does not exist')
|
||||
self.fail("Should not find host filter.")
|
||||
except exception.SchedulerHostFilterNotFound:
|
||||
pass
|
||||
|
||||
def test_all_host_filter(self):
|
||||
hf = host_filter.AllHostsFilter()
|
||||
cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(10, len(hosts))
|
||||
for host, capabilities in hosts:
|
||||
self.assertTrue(host.startswith('host'))
|
||||
|
||||
def test_instance_type_filter(self):
|
||||
hf = host_filter.InstanceTypeFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
||||
name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
def test_json_filter(self):
|
||||
hf = host_filter.JsonFilter()
|
||||
# filter all hosts that can support 50 ram and 500 disk
|
||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.assertEquals(6, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
self.assertEquals('host05', just_hosts[0])
|
||||
self.assertEquals('host10', just_hosts[5])
|
||||
|
||||
# Try some custom queries
|
||||
|
||||
raw = ['or',
|
||||
['and',
|
||||
['<', '$compute.host_memory_free', 30],
|
||||
['<', '$compute.disk_available', 300],
|
||||
],
|
||||
['and',
|
||||
['>', '$compute.host_memory_free', 70],
|
||||
['>', '$compute.disk_available', 700],
|
||||
],
|
||||
]
|
||||
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([1, 2, 8, 9, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
raw = ['not',
|
||||
['=', '$compute.host_memory_free', 30],
|
||||
]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(9, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
||||
cooked = json.dumps(raw)
|
||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||
|
||||
self.assertEquals(5, len(hosts))
|
||||
just_hosts = [host for host, caps in hosts]
|
||||
just_hosts.sort()
|
||||
for index, host in zip([2, 4, 6, 8, 10], just_hosts):
|
||||
self.assertEquals('host%02d' % index, host)
|
||||
|
||||
# Try some bogus input ...
|
||||
raw = ['unknown command', ]
|
||||
cooked = json.dumps(raw)
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, cooked)
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
['not', True, False, True, False])))
|
||||
|
||||
try:
|
||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
||||
'not', True, False, True, False))
|
||||
self.fail("Should give KeyError")
|
||||
except KeyError, e:
|
||||
pass
|
||||
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$foo', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', '$.....', 100])))
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(
|
||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
||||
|
||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
||||
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
Reference in New Issue
Block a user