got tests passing with logic changes
This commit is contained in:
@@ -52,7 +52,6 @@ class AbstractScheduler(driver.Scheduler):
|
|||||||
"""Base class for creating Schedulers that can work across any nova
|
"""Base class for creating Schedulers that can work across any nova
|
||||||
deployment, from simple designs to multiply-nested zones.
|
deployment, from simple designs to multiply-nested zones.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def _call_zone_method(self, context, method, specs, zones):
|
def _call_zone_method(self, context, method, specs, zones):
|
||||||
"""Call novaclient zone method. Broken out for testing."""
|
"""Call novaclient zone method. Broken out for testing."""
|
||||||
return api.call_zone_method(context, method, specs=specs, zones=zones)
|
return api.call_zone_method(context, method, specs=specs, zones=zones)
|
||||||
@@ -73,16 +72,16 @@ class AbstractScheduler(driver.Scheduler):
|
|||||||
instance_id = instance['id']
|
instance_id = instance['id']
|
||||||
kwargs['instance_id'] = instance_id
|
kwargs['instance_id'] = instance_id
|
||||||
|
|
||||||
rpc.cast(context,
|
queue = db.queue_get_for(context, "compute", host)
|
||||||
db.queue_get_for(context, "compute", host),
|
params = {"method": "run_instance", "args": kwargs}
|
||||||
{"method": "run_instance",
|
rpc.cast(context, queue, params)
|
||||||
"args": kwargs})
|
|
||||||
LOG.debug(_("Provisioning locally via compute node %(host)s")
|
LOG.debug(_("Provisioning locally via compute node %(host)s")
|
||||||
% locals())
|
% locals())
|
||||||
|
|
||||||
def _decrypt_blob(self, blob):
|
def _decrypt_blob(self, blob):
|
||||||
"""Returns the decrypted blob or None if invalid. Broken out
|
"""Returns the decrypted blob or None if invalid. Broken out
|
||||||
for testing."""
|
for testing.
|
||||||
|
"""
|
||||||
decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
|
decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
|
||||||
try:
|
try:
|
||||||
json_entry = decryptor(blob)
|
json_entry = decryptor(blob)
|
||||||
@@ -99,8 +98,8 @@ class AbstractScheduler(driver.Scheduler):
|
|||||||
|
|
||||||
Note that we have to reverse engineer from our args to get back the
|
Note that we have to reverse engineer from our args to get back the
|
||||||
image, flavor, ipgroup, etc. since the original call could have
|
image, flavor, ipgroup, etc. since the original call could have
|
||||||
come in from EC2 (which doesn't use these things)."""
|
come in from EC2 (which doesn't use these things).
|
||||||
|
"""
|
||||||
instance_type = request_spec['instance_type']
|
instance_type = request_spec['instance_type']
|
||||||
instance_properties = request_spec['instance_properties']
|
instance_properties = request_spec['instance_properties']
|
||||||
|
|
||||||
@@ -109,17 +108,14 @@ class AbstractScheduler(driver.Scheduler):
|
|||||||
meta = instance_properties['metadata']
|
meta = instance_properties['metadata']
|
||||||
flavor_id = instance_type['flavorid']
|
flavor_id = instance_type['flavorid']
|
||||||
reservation_id = instance_properties['reservation_id']
|
reservation_id = instance_properties['reservation_id']
|
||||||
|
|
||||||
files = kwargs['injected_files']
|
files = kwargs['injected_files']
|
||||||
ipgroup = None # Not supported in OS API ... yet
|
ipgroup = None # Not supported in OS API ... yet
|
||||||
|
|
||||||
child_zone = zone_info['child_zone']
|
child_zone = zone_info['child_zone']
|
||||||
child_blob = zone_info['child_blob']
|
child_blob = zone_info['child_blob']
|
||||||
zone = db.zone_get(context, child_zone)
|
zone = db.zone_get(context, child_zone)
|
||||||
url = zone.api_url
|
url = zone.api_url
|
||||||
LOG.debug(_("Forwarding instance create call to child zone %(url)s"
|
LOG.debug(_("Forwarding instance create call to child zone %(url)s"
|
||||||
". ReservationID=%(reservation_id)s")
|
". ReservationID=%(reservation_id)s") % locals())
|
||||||
% locals())
|
|
||||||
nova = None
|
nova = None
|
||||||
try:
|
try:
|
||||||
nova = novaclient.Client(zone.username, zone.password, None, url)
|
nova = novaclient.Client(zone.username, zone.password, None, url)
|
||||||
@@ -127,7 +123,6 @@ class AbstractScheduler(driver.Scheduler):
|
|||||||
except novaclient_exceptions.BadRequest, e:
|
except novaclient_exceptions.BadRequest, e:
|
||||||
raise exception.NotAuthorized(_("Bad credentials attempting "
|
raise exception.NotAuthorized(_("Bad credentials attempting "
|
||||||
"to talk to zone at %(url)s.") % locals())
|
"to talk to zone at %(url)s.") % locals())
|
||||||
|
|
||||||
nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
|
nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
|
||||||
child_blob, reservation_id=reservation_id)
|
child_blob, reservation_id=reservation_id)
|
||||||
|
|
||||||
@@ -145,8 +140,8 @@ class AbstractScheduler(driver.Scheduler):
|
|||||||
means we gathered the info from one of our children.
|
means we gathered the info from one of our children.
|
||||||
It's possible that, when we decrypt the 'blob' field, it
|
It's possible that, when we decrypt the 'blob' field, it
|
||||||
contains "child_blob" data. In which case we forward the
|
contains "child_blob" data. In which case we forward the
|
||||||
request."""
|
request.
|
||||||
|
"""
|
||||||
host_info = None
|
host_info = None
|
||||||
if "blob" in build_plan_item:
|
if "blob" in build_plan_item:
|
||||||
# Request was passed in from above. Is it for us?
|
# Request was passed in from above. Is it for us?
|
||||||
@@ -173,7 +168,6 @@ class AbstractScheduler(driver.Scheduler):
|
|||||||
self._provision_resource_locally(context, build_plan_item,
|
self._provision_resource_locally(context, build_plan_item,
|
||||||
request_spec, kwargs)
|
request_spec, kwargs)
|
||||||
return
|
return
|
||||||
|
|
||||||
self._provision_resource_from_blob(context, build_plan_item,
|
self._provision_resource_from_blob(context, build_plan_item,
|
||||||
instance_id, request_spec, kwargs)
|
instance_id, request_spec, kwargs)
|
||||||
|
|
||||||
@@ -231,7 +225,6 @@ class AbstractScheduler(driver.Scheduler):
|
|||||||
for num in xrange(num_instances):
|
for num in xrange(num_instances):
|
||||||
if not build_plan:
|
if not build_plan:
|
||||||
break
|
break
|
||||||
|
|
||||||
build_plan_item = build_plan.pop(0)
|
build_plan_item = build_plan.pop(0)
|
||||||
self._provision_resource(context, build_plan_item, instance_id,
|
self._provision_resource(context, build_plan_item, instance_id,
|
||||||
request_spec, kwargs)
|
request_spec, kwargs)
|
||||||
|
|||||||
@@ -43,40 +43,13 @@ class BaseScheduler(abstract_scheduler.AbstractScheduler):
|
|||||||
# TODO(sandy): We're only using InstanceType-based specs
|
# TODO(sandy): We're only using InstanceType-based specs
|
||||||
# currently. Later we'll need to snoop for more detailed
|
# currently. Later we'll need to snoop for more detailed
|
||||||
# host filter requests.
|
# host filter requests.
|
||||||
instance_type = request_spec['instance_type']
|
instance_type = request_spec.get("instance_type", None)
|
||||||
|
if instance_type is None:
|
||||||
|
# No way to select; return the specified hosts
|
||||||
|
return hosts or []
|
||||||
name, query = selected_filter.instance_type_to_filter(instance_type)
|
name, query = selected_filter.instance_type_to_filter(instance_type)
|
||||||
return selected_filter.filter_hosts(self.zone_manager, query)
|
return selected_filter.filter_hosts(self.zone_manager, query)
|
||||||
|
|
||||||
def filter_hosts(self, topic, request_spec, host_list=None):
|
|
||||||
"""Return a list of hosts which are acceptable for scheduling.
|
|
||||||
Return value should be a list of (hostname, capability_dict)s.
|
|
||||||
Derived classes may override this, but may find the
|
|
||||||
'<topic>_filter' function more appropriate.
|
|
||||||
"""
|
|
||||||
def _default_filter(self, hostname, capabilities, request_spec):
|
|
||||||
"""Default filter function if there's no <topic>_filter"""
|
|
||||||
# NOTE(sirp): The default logic is the equivalent to
|
|
||||||
# AllHostsFilter
|
|
||||||
return True
|
|
||||||
|
|
||||||
filter_func = getattr(self, '%s_filter' % topic, _default_filter)
|
|
||||||
|
|
||||||
if host_list is None:
|
|
||||||
first_run = True
|
|
||||||
host_list = self.zone_manager.service_states.iteritems()
|
|
||||||
else:
|
|
||||||
first_run = False
|
|
||||||
|
|
||||||
filtered_hosts = []
|
|
||||||
for host, services in host_list:
|
|
||||||
if first_run:
|
|
||||||
if topic not in services:
|
|
||||||
continue
|
|
||||||
services = services[topic]
|
|
||||||
if filter_func(host, services, request_spec):
|
|
||||||
filtered_hosts.append((host, services))
|
|
||||||
return filtered_hosts
|
|
||||||
|
|
||||||
def weigh_hosts(self, topic, request_spec, hosts):
|
def weigh_hosts(self, topic, request_spec, hosts):
|
||||||
"""Derived classes may override this to provide more sophisticated
|
"""Derived classes may override this to provide more sophisticated
|
||||||
scheduling objectives
|
scheduling objectives
|
||||||
@@ -84,18 +57,3 @@ class BaseScheduler(abstract_scheduler.AbstractScheduler):
|
|||||||
# NOTE(sirp): The default logic is the same as the NoopCostFunction
|
# NOTE(sirp): The default logic is the same as the NoopCostFunction
|
||||||
return [dict(weight=1, hostname=hostname, capabilities=capabilities)
|
return [dict(weight=1, hostname=hostname, capabilities=capabilities)
|
||||||
for hostname, capabilities in hosts]
|
for hostname, capabilities in hosts]
|
||||||
|
|
||||||
def compute_consume(self, capabilities, instance_type):
|
|
||||||
"""Consume compute resources for selected host"""
|
|
||||||
|
|
||||||
requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024
|
|
||||||
capabilities['host_memory_free'] -= requested_mem
|
|
||||||
|
|
||||||
def consume_resources(self, topic, capabilities, instance_type):
|
|
||||||
"""Consume resources for a specific host. 'host' is a tuple
|
|
||||||
of the hostname and the services"""
|
|
||||||
|
|
||||||
consume_func = getattr(self, '%s_consume' % topic, None)
|
|
||||||
if not consume_func:
|
|
||||||
return
|
|
||||||
consume_func(capabilities, instance_type)
|
|
||||||
|
|||||||
@@ -13,6 +13,23 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
There are three filters included: AllHosts, InstanceType & JSON.
|
||||||
|
|
||||||
|
AllHosts just returns the full, unfiltered list of hosts.
|
||||||
|
InstanceType is a hard coded matching mechanism based on flavor criteria.
|
||||||
|
JSON is an ad-hoc filter grammar.
|
||||||
|
|
||||||
|
Why JSON? The requests for instances may come in through the
|
||||||
|
REST interface from a user or a parent Zone.
|
||||||
|
Currently InstanceTypes are used for specifing the type of instance desired.
|
||||||
|
Specific Nova users have noted a need for a more expressive way of specifying
|
||||||
|
instance requirements. Since we don't want to get into building full DSL,
|
||||||
|
this filter is a simple form as an example of how this could be done.
|
||||||
|
In reality, most consumers will use the more rigid filters such as the
|
||||||
|
InstanceType filter.
|
||||||
|
"""
|
||||||
|
|
||||||
from abstract_filter import AbstractHostFilter
|
from abstract_filter import AbstractHostFilter
|
||||||
from all_hosts_filter import AllHostsFilter
|
from all_hosts_filter import AllHostsFilter
|
||||||
from instance_type_filter import InstanceTypeFilter
|
from instance_type_filter import InstanceTypeFilter
|
||||||
|
|||||||
@@ -19,7 +19,7 @@ from nova import flags
|
|||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_string('default_host_filter',
|
flags.DEFINE_string('default_host_filter',
|
||||||
'nova.scheduler.filters.AllHostsFilter',
|
'AllHostsFilter',
|
||||||
'Which filter to use for filtering hosts')
|
'Which filter to use for filtering hosts')
|
||||||
|
|
||||||
class AbstractHostFilter(object):
|
class AbstractHostFilter(object):
|
||||||
|
|||||||
@@ -20,11 +20,6 @@ import operator
|
|||||||
import nova.scheduler
|
import nova.scheduler
|
||||||
from nova.scheduler.filters import abstract_filter
|
from nova.scheduler.filters import abstract_filter
|
||||||
|
|
||||||
def debug(*args):
|
|
||||||
with file("/tmp/debug", "a") as dbg:
|
|
||||||
msg = " ".join([str(arg) for arg in args])
|
|
||||||
dbg.write("%s\n" % msg)
|
|
||||||
|
|
||||||
|
|
||||||
class JsonFilter(abstract_filter.AbstractHostFilter):
|
class JsonFilter(abstract_filter.AbstractHostFilter):
|
||||||
"""Host Filter to allow simple JSON-based grammar for
|
"""Host Filter to allow simple JSON-based grammar for
|
||||||
@@ -38,12 +33,7 @@ class JsonFilter(abstract_filter.AbstractHostFilter):
|
|||||||
if len(args) < 2:
|
if len(args) < 2:
|
||||||
return False
|
return False
|
||||||
if op is operator.contains:
|
if op is operator.contains:
|
||||||
debug("ARGS", type(args), args)
|
bad = not args[0] in args[1:]
|
||||||
debug("op", op)
|
|
||||||
debug("REVERSED!!!")
|
|
||||||
# operator.contains reverses the param order.
|
|
||||||
bad = [arg for arg in args[1:]
|
|
||||||
if not op(args, args[0])]
|
|
||||||
else:
|
else:
|
||||||
bad = [arg for arg in args[1:]
|
bad = [arg for arg in args[1:]
|
||||||
if not op(args[0], arg)]
|
if not op(args[0], arg)]
|
||||||
@@ -144,8 +134,6 @@ class JsonFilter(abstract_filter.AbstractHostFilter):
|
|||||||
specified in the query.
|
specified in the query.
|
||||||
"""
|
"""
|
||||||
expanded = json.loads(query)
|
expanded = json.loads(query)
|
||||||
|
|
||||||
debug("expanded", type(expanded), expanded)
|
|
||||||
filtered_hosts = []
|
filtered_hosts = []
|
||||||
for host, services in zone_manager.service_states.iteritems():
|
for host, services in zone_manager.service_states.iteritems():
|
||||||
result = self._process_filter(zone_manager, expanded, host,
|
result = self._process_filter(zone_manager, expanded, host,
|
||||||
|
|||||||
@@ -20,43 +20,32 @@ either incompatible or insufficient to accept a newly-requested instance
|
|||||||
are removed by Host Filter classes from consideration. Those that pass
|
are removed by Host Filter classes from consideration. Those that pass
|
||||||
the filter are then passed on for weighting or other process for ordering.
|
the filter are then passed on for weighting or other process for ordering.
|
||||||
|
|
||||||
Three filters are included: AllHosts, Flavor & JSON. AllHosts just
|
Filters are in the 'filters' directory that is off the 'scheduler'
|
||||||
returns the full, unfiltered list of hosts. Flavor is a hard coded
|
directory of nova. Additional filters can be created and added to that
|
||||||
matching mechanism based on flavor criteria and JSON is an ad-hoc
|
directory; be sure to add them to the filters/__init__.py file so that
|
||||||
filter grammar.
|
they are part of the nova.schedulers.filters namespace.
|
||||||
|
|
||||||
Why JSON? The requests for instances may come in through the
|
|
||||||
REST interface from a user or a parent Zone.
|
|
||||||
Currently Flavors and/or InstanceTypes are used for
|
|
||||||
specifing the type of instance desired. Specific Nova users have
|
|
||||||
noted a need for a more expressive way of specifying instances.
|
|
||||||
Since we don't want to get into building full DSL this is a simple
|
|
||||||
form as an example of how this could be done. In reality, most
|
|
||||||
consumers will use the more rigid filters such as FlavorFilter.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
import json
|
|
||||||
import types
|
import types
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
|
||||||
|
|
||||||
import nova.scheduler
|
import nova.scheduler
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger('nova.scheduler.host_filter')
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
def _get_filters():
|
def _get_filters():
|
||||||
|
# Imported here to avoid circular imports
|
||||||
from nova.scheduler import filters
|
from nova.scheduler import filters
|
||||||
def get_itm(nm):
|
def get_itm(nm):
|
||||||
return getattr(filters, nm)
|
return getattr(filters, nm)
|
||||||
|
|
||||||
return [get_itm(itm) for itm in dir(filters)
|
return [get_itm(itm) for itm in dir(filters)
|
||||||
if (type(get_itm(itm)) is types.TypeType)
|
if (type(get_itm(itm)) is types.TypeType)
|
||||||
and issubclass(get_itm(itm), filters.AbstractHostFilter)]
|
and issubclass(get_itm(itm), filters.AbstractHostFilter)
|
||||||
|
and get_itm(itm) is not filters.AbstractHostFilter]
|
||||||
|
|
||||||
|
|
||||||
def choose_host_filter(filter_name=None):
|
def choose_host_filter(filter_name=None):
|
||||||
|
|||||||
@@ -22,14 +22,12 @@ The cost-function and weights are tabulated, and the host with the least cost
|
|||||||
is then selected for provisioning.
|
is then selected for provisioning.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# TODO(dabo): This class will be removed in the next merge prop; it remains now
|
|
||||||
# because much of the code will be refactored into different classes.
|
|
||||||
|
|
||||||
import collections
|
import collections
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova.scheduler import abstract_scheduler
|
from nova.scheduler import base_scheduler
|
||||||
from nova import utils
|
from nova import utils
|
||||||
from nova import exception
|
from nova import exception
|
||||||
|
|
||||||
@@ -45,6 +43,8 @@ flags.DEFINE_list('least_cost_scheduler_cost_functions',
|
|||||||
# cost_functions.py file (perhaps in a least_cost_scheduler directory)
|
# cost_functions.py file (perhaps in a least_cost_scheduler directory)
|
||||||
flags.DEFINE_integer('noop_cost_fn_weight', 1,
|
flags.DEFINE_integer('noop_cost_fn_weight', 1,
|
||||||
'How much weight to give the noop cost function')
|
'How much weight to give the noop cost function')
|
||||||
|
flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
|
||||||
|
'How much weight to give the fill-first cost function')
|
||||||
|
|
||||||
|
|
||||||
def noop_cost_fn(host):
|
def noop_cost_fn(host):
|
||||||
@@ -52,87 +52,20 @@ def noop_cost_fn(host):
|
|||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
|
||||||
flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
|
|
||||||
'How much weight to give the fill-first cost function')
|
|
||||||
|
|
||||||
|
|
||||||
def compute_fill_first_cost_fn(host):
|
def compute_fill_first_cost_fn(host):
|
||||||
"""Prefer hosts that have less ram available, filter_hosts will exclude
|
"""Prefer hosts that have less ram available, filter_hosts will exclude
|
||||||
hosts that don't have enough ram"""
|
hosts that don't have enough ram.
|
||||||
hostname, caps = host
|
"""
|
||||||
free_mem = caps['host_memory_free']
|
hostname, service = host
|
||||||
|
caps = service.get("compute", {})
|
||||||
|
free_mem = caps.get("host_memory_free", 0)
|
||||||
return free_mem
|
return free_mem
|
||||||
|
|
||||||
|
|
||||||
class LeastCostScheduler(abstract_scheduler.AbstractScheduler):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
self.cost_fns_cache = {}
|
|
||||||
super(LeastCostScheduler, self).__init__(*args, **kwargs)
|
|
||||||
|
|
||||||
def get_cost_fns(self, topic):
|
|
||||||
"""Returns a list of tuples containing weights and cost functions to
|
|
||||||
use for weighing hosts
|
|
||||||
"""
|
|
||||||
|
|
||||||
if topic in self.cost_fns_cache:
|
|
||||||
return self.cost_fns_cache[topic]
|
|
||||||
|
|
||||||
cost_fns = []
|
|
||||||
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
|
||||||
if '.' in cost_fn_str:
|
|
||||||
short_name = cost_fn_str.split('.')[-1]
|
|
||||||
else:
|
|
||||||
short_name = cost_fn_str
|
|
||||||
cost_fn_str = "%s.%s.%s" % (
|
|
||||||
__name__, self.__class__.__name__, short_name)
|
|
||||||
|
|
||||||
if not (short_name.startswith('%s_' % topic) or
|
|
||||||
short_name.startswith('noop')):
|
|
||||||
continue
|
|
||||||
|
|
||||||
try:
|
|
||||||
# NOTE(sirp): import_class is somewhat misnamed since it can
|
|
||||||
# any callable from a module
|
|
||||||
cost_fn = utils.import_class(cost_fn_str)
|
|
||||||
except exception.ClassNotFound:
|
|
||||||
raise exception.SchedulerCostFunctionNotFound(
|
|
||||||
cost_fn_str=cost_fn_str)
|
|
||||||
|
|
||||||
try:
|
|
||||||
flag_name = "%s_weight" % cost_fn.__name__
|
|
||||||
weight = getattr(FLAGS, flag_name)
|
|
||||||
except AttributeError:
|
|
||||||
raise exception.SchedulerWeightFlagNotFound(
|
|
||||||
flag_name=flag_name)
|
|
||||||
|
|
||||||
cost_fns.append((weight, cost_fn))
|
|
||||||
|
|
||||||
self.cost_fns_cache[topic] = cost_fns
|
|
||||||
return cost_fns
|
|
||||||
|
|
||||||
def weigh_hosts(self, topic, request_spec, hosts):
|
|
||||||
"""Returns a list of dictionaries of form:
|
|
||||||
[ {weight: weight, hostname: hostname, capabilities: capabs} ]
|
|
||||||
"""
|
|
||||||
|
|
||||||
cost_fns = self.get_cost_fns(topic)
|
|
||||||
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
|
||||||
|
|
||||||
weighted = []
|
|
||||||
weight_log = []
|
|
||||||
for cost, (hostname, caps) in zip(costs, hosts):
|
|
||||||
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
|
||||||
weight_dict = dict(weight=cost, hostname=hostname,
|
|
||||||
capabilities=caps)
|
|
||||||
weighted.append(weight_dict)
|
|
||||||
|
|
||||||
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
|
||||||
return weighted
|
|
||||||
|
|
||||||
|
|
||||||
def normalize_list(L):
|
def normalize_list(L):
|
||||||
"""Normalize an array of numbers such that each element satisfies:
|
"""Normalize an array of numbers such that each element satisfies:
|
||||||
0 <= e <= 1"""
|
0 <= e <= 1
|
||||||
|
"""
|
||||||
if not L:
|
if not L:
|
||||||
return L
|
return L
|
||||||
max_ = max(L)
|
max_ = max(L)
|
||||||
@@ -160,12 +93,10 @@ def weighted_sum(domain, weighted_fns, normalize=True):
|
|||||||
score_table = collections.defaultdict(list)
|
score_table = collections.defaultdict(list)
|
||||||
for weight, fn in weighted_fns:
|
for weight, fn in weighted_fns:
|
||||||
scores = [fn(elem) for elem in domain]
|
scores = [fn(elem) for elem in domain]
|
||||||
|
|
||||||
if normalize:
|
if normalize:
|
||||||
norm_scores = normalize_list(scores)
|
norm_scores = normalize_list(scores)
|
||||||
else:
|
else:
|
||||||
norm_scores = scores
|
norm_scores = scores
|
||||||
|
|
||||||
for idx, score in enumerate(norm_scores):
|
for idx, score in enumerate(norm_scores):
|
||||||
weighted_score = score * weight
|
weighted_score = score * weight
|
||||||
score_table[idx].append(weighted_score)
|
score_table[idx].append(weighted_score)
|
||||||
@@ -175,5 +106,66 @@ def weighted_sum(domain, weighted_fns, normalize=True):
|
|||||||
for idx in sorted(score_table):
|
for idx in sorted(score_table):
|
||||||
elem_score = sum(score_table[idx])
|
elem_score = sum(score_table[idx])
|
||||||
domain_scores.append(elem_score)
|
domain_scores.append(elem_score)
|
||||||
|
|
||||||
return domain_scores
|
return domain_scores
|
||||||
|
|
||||||
|
|
||||||
|
class LeastCostScheduler(base_scheduler.BaseScheduler):
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.cost_fns_cache = {}
|
||||||
|
super(LeastCostScheduler, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def get_cost_fns(self, topic):
|
||||||
|
"""Returns a list of tuples containing weights and cost functions to
|
||||||
|
use for weighing hosts
|
||||||
|
"""
|
||||||
|
if topic in self.cost_fns_cache:
|
||||||
|
return self.cost_fns_cache[topic]
|
||||||
|
cost_fns = []
|
||||||
|
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
||||||
|
if '.' in cost_fn_str:
|
||||||
|
short_name = cost_fn_str.split('.')[-1]
|
||||||
|
else:
|
||||||
|
short_name = cost_fn_str
|
||||||
|
cost_fn_str = "%s.%s.%s" % (
|
||||||
|
__name__, self.__class__.__name__, short_name)
|
||||||
|
if not (short_name.startswith('%s_' % topic) or
|
||||||
|
short_name.startswith('noop')):
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
# NOTE(sirp): import_class is somewhat misnamed since it can
|
||||||
|
# any callable from a module
|
||||||
|
cost_fn = utils.import_class(cost_fn_str)
|
||||||
|
except exception.ClassNotFound:
|
||||||
|
raise exception.SchedulerCostFunctionNotFound(
|
||||||
|
cost_fn_str=cost_fn_str)
|
||||||
|
|
||||||
|
try:
|
||||||
|
flag_name = "%s_weight" % cost_fn.__name__
|
||||||
|
weight = getattr(FLAGS, flag_name)
|
||||||
|
except AttributeError:
|
||||||
|
raise exception.SchedulerWeightFlagNotFound(
|
||||||
|
flag_name=flag_name)
|
||||||
|
cost_fns.append((weight, cost_fn))
|
||||||
|
|
||||||
|
self.cost_fns_cache[topic] = cost_fns
|
||||||
|
return cost_fns
|
||||||
|
|
||||||
|
def weigh_hosts(self, topic, request_spec, hosts):
|
||||||
|
"""Returns a list of dictionaries of form:
|
||||||
|
[ {weight: weight, hostname: hostname, capabilities: capabs} ]
|
||||||
|
"""
|
||||||
|
cost_fns = self.get_cost_fns(topic)
|
||||||
|
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
||||||
|
|
||||||
|
weighted = []
|
||||||
|
weight_log = []
|
||||||
|
for cost, (hostname, service) in zip(costs, hosts):
|
||||||
|
caps = service[topic]
|
||||||
|
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||||
|
weight_dict = dict(weight=cost, hostname=hostname,
|
||||||
|
capabilities=caps)
|
||||||
|
weighted.append(weight_dict)
|
||||||
|
|
||||||
|
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
||||||
|
return weighted
|
||||||
|
|||||||
@@ -192,9 +192,7 @@ class HostFilterTestCase(test.TestCase):
|
|||||||
msg = " ".join([str(arg) for arg in args])
|
msg = " ".join([str(arg) for arg in args])
|
||||||
dbg.write("%s\n" % msg)
|
dbg.write("%s\n" % msg)
|
||||||
|
|
||||||
debug("cooked", cooked, type(cooked))
|
|
||||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
||||||
|
|
||||||
self.assertEquals(5, len(hosts))
|
self.assertEquals(5, len(hosts))
|
||||||
just_hosts = [host for host, caps in hosts]
|
just_hosts = [host for host, caps in hosts]
|
||||||
just_hosts.sort()
|
just_hosts.sort()
|
||||||
|
|||||||
@@ -15,6 +15,7 @@
|
|||||||
"""
|
"""
|
||||||
Tests For Least Cost Scheduler
|
Tests For Least Cost Scheduler
|
||||||
"""
|
"""
|
||||||
|
import copy
|
||||||
|
|
||||||
from nova import test
|
from nova import test
|
||||||
from nova.scheduler import least_cost
|
from nova.scheduler import least_cost
|
||||||
@@ -81,7 +82,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
|||||||
super(LeastCostSchedulerTestCase, self).tearDown()
|
super(LeastCostSchedulerTestCase, self).tearDown()
|
||||||
|
|
||||||
def assertWeights(self, expected, num, request_spec, hosts):
|
def assertWeights(self, expected, num, request_spec, hosts):
|
||||||
weighted = self.sched.weigh_hosts(num, request_spec, hosts)
|
weighted = self.sched.weigh_hosts("compute", request_spec, hosts)
|
||||||
self.assertDictListMatch(weighted, expected, approx_equal=True)
|
self.assertDictListMatch(weighted, expected, approx_equal=True)
|
||||||
|
|
||||||
def test_no_hosts(self):
|
def test_no_hosts(self):
|
||||||
@@ -125,19 +126,20 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
|||||||
num = 1
|
num = 1
|
||||||
instance_type = {'memory_mb': 1024}
|
instance_type = {'memory_mb': 1024}
|
||||||
request_spec = {'instance_type': instance_type}
|
request_spec = {'instance_type': instance_type}
|
||||||
all_hosts = self.sched.zone_manager.service_states.iteritems()
|
svc_states = self.sched.zone_manager.service_states.iteritems()
|
||||||
all_hosts = [(host, services["compute"])
|
all_hosts = [(host, services["compute"])
|
||||||
for host, services in all_hosts
|
for host, services in svc_states
|
||||||
if "compute" in services]
|
if "compute" in services]
|
||||||
hosts = self.sched.filter_hosts('compute', request_spec, host_list)
|
hosts = self.sched.filter_hosts('compute', request_spec, all_hosts)
|
||||||
|
|
||||||
expected = []
|
expected = []
|
||||||
for idx, (hostname, caps) in enumerate(hosts):
|
for idx, (hostname, services) in enumerate(hosts):
|
||||||
|
caps = copy.deepcopy(services["compute"])
|
||||||
# Costs are normalized so over 10 hosts, each host with increasing
|
# Costs are normalized so over 10 hosts, each host with increasing
|
||||||
# free ram will cost 1/N more. Since the lowest cost host has some
|
# free ram will cost 1/N more. Since the lowest cost host has some
|
||||||
# free ram, we add in the 1/N for the base_cost
|
# free ram, we add in the 1/N for the base_cost
|
||||||
weight = 0.1 + (0.1 * idx)
|
weight = 0.1 + (0.1 * idx)
|
||||||
weight_dict = dict(weight=weight, hostname=hostname)
|
wtd_dict = dict(hostname=hostname, weight=weight, capabilities=caps)
|
||||||
expected.append(weight_dict)
|
expected.append(wtd_dict)
|
||||||
|
|
||||||
self.assertWeights(expected, num, request_spec, hosts)
|
self.assertWeights(expected, num, request_spec, hosts)
|
||||||
|
|||||||
@@ -1,200 +0,0 @@
|
|||||||
# Copyright 2011 OpenStack LLC.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
"""
|
|
||||||
Tests For Scheduler Host Filters.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import json
|
|
||||||
|
|
||||||
from nova import exception
|
|
||||||
from nova import test
|
|
||||||
from nova.scheduler import host_filter
|
|
||||||
|
|
||||||
|
|
||||||
class FakeZoneManager:
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class HostFilterTestCase(test.TestCase):
|
|
||||||
"""Test case for host filters."""
|
|
||||||
|
|
||||||
def _host_caps(self, multiplier):
|
|
||||||
# Returns host capabilities in the following way:
|
|
||||||
# host1 = memory:free 10 (100max)
|
|
||||||
# disk:available 100 (1000max)
|
|
||||||
# hostN = memory:free 10 + 10N
|
|
||||||
# disk:available 100 + 100N
|
|
||||||
# in other words: hostN has more resources than host0
|
|
||||||
# which means ... don't go above 10 hosts.
|
|
||||||
return {'host_name-description': 'XenServer %s' % multiplier,
|
|
||||||
'host_hostname': 'xs-%s' % multiplier,
|
|
||||||
'host_memory_total': 100,
|
|
||||||
'host_memory_overhead': 10,
|
|
||||||
'host_memory_free': 10 + multiplier * 10,
|
|
||||||
'host_memory_free-computed': 10 + multiplier * 10,
|
|
||||||
'host_other-config': {},
|
|
||||||
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
|
|
||||||
'host_cpu_info': {},
|
|
||||||
'disk_available': 100 + multiplier * 100,
|
|
||||||
'disk_total': 1000,
|
|
||||||
'disk_used': 0,
|
|
||||||
'host_uuid': 'xxx-%d' % multiplier,
|
|
||||||
'host_name-label': 'xs-%s' % multiplier}
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
super(HostFilterTestCase, self).setUp()
|
|
||||||
default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter'
|
|
||||||
self.flags(default_host_filter=default_host_filter)
|
|
||||||
self.instance_type = dict(name='tiny',
|
|
||||||
memory_mb=50,
|
|
||||||
vcpus=10,
|
|
||||||
local_gb=500,
|
|
||||||
flavorid=1,
|
|
||||||
swap=500,
|
|
||||||
rxtx_quota=30000,
|
|
||||||
rxtx_cap=200,
|
|
||||||
extra_specs={})
|
|
||||||
|
|
||||||
self.zone_manager = FakeZoneManager()
|
|
||||||
states = {}
|
|
||||||
for x in xrange(10):
|
|
||||||
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
|
|
||||||
self.zone_manager.service_states = states
|
|
||||||
|
|
||||||
def test_choose_filter(self):
|
|
||||||
# Test default filter ...
|
|
||||||
hf = host_filter.choose_host_filter()
|
|
||||||
self.assertEquals(hf._full_name(),
|
|
||||||
'nova.scheduler.host_filter.AllHostsFilter')
|
|
||||||
# Test valid filter ...
|
|
||||||
hf = host_filter.choose_host_filter(
|
|
||||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
|
||||||
self.assertEquals(hf._full_name(),
|
|
||||||
'nova.scheduler.host_filter.InstanceTypeFilter')
|
|
||||||
# Test invalid filter ...
|
|
||||||
try:
|
|
||||||
host_filter.choose_host_filter('does not exist')
|
|
||||||
self.fail("Should not find host filter.")
|
|
||||||
except exception.SchedulerHostFilterNotFound:
|
|
||||||
pass
|
|
||||||
|
|
||||||
def test_all_host_filter(self):
|
|
||||||
hf = host_filter.AllHostsFilter()
|
|
||||||
cooked = hf.instance_type_to_filter(self.instance_type)
|
|
||||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
|
||||||
self.assertEquals(10, len(hosts))
|
|
||||||
for host, capabilities in hosts:
|
|
||||||
self.assertTrue(host.startswith('host'))
|
|
||||||
|
|
||||||
def test_instance_type_filter(self):
|
|
||||||
hf = host_filter.InstanceTypeFilter()
|
|
||||||
# filter all hosts that can support 50 ram and 500 disk
|
|
||||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
|
||||||
self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
|
|
||||||
name)
|
|
||||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
|
||||||
self.assertEquals(6, len(hosts))
|
|
||||||
just_hosts = [host for host, caps in hosts]
|
|
||||||
just_hosts.sort()
|
|
||||||
self.assertEquals('host05', just_hosts[0])
|
|
||||||
self.assertEquals('host10', just_hosts[5])
|
|
||||||
|
|
||||||
def test_json_filter(self):
|
|
||||||
hf = host_filter.JsonFilter()
|
|
||||||
# filter all hosts that can support 50 ram and 500 disk
|
|
||||||
name, cooked = hf.instance_type_to_filter(self.instance_type)
|
|
||||||
self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
|
|
||||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
|
||||||
self.assertEquals(6, len(hosts))
|
|
||||||
just_hosts = [host for host, caps in hosts]
|
|
||||||
just_hosts.sort()
|
|
||||||
self.assertEquals('host05', just_hosts[0])
|
|
||||||
self.assertEquals('host10', just_hosts[5])
|
|
||||||
|
|
||||||
# Try some custom queries
|
|
||||||
|
|
||||||
raw = ['or',
|
|
||||||
['and',
|
|
||||||
['<', '$compute.host_memory_free', 30],
|
|
||||||
['<', '$compute.disk_available', 300],
|
|
||||||
],
|
|
||||||
['and',
|
|
||||||
['>', '$compute.host_memory_free', 70],
|
|
||||||
['>', '$compute.disk_available', 700],
|
|
||||||
],
|
|
||||||
]
|
|
||||||
|
|
||||||
cooked = json.dumps(raw)
|
|
||||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
|
||||||
|
|
||||||
self.assertEquals(5, len(hosts))
|
|
||||||
just_hosts = [host for host, caps in hosts]
|
|
||||||
just_hosts.sort()
|
|
||||||
for index, host in zip([1, 2, 8, 9, 10], just_hosts):
|
|
||||||
self.assertEquals('host%02d' % index, host)
|
|
||||||
|
|
||||||
raw = ['not',
|
|
||||||
['=', '$compute.host_memory_free', 30],
|
|
||||||
]
|
|
||||||
cooked = json.dumps(raw)
|
|
||||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
|
||||||
|
|
||||||
self.assertEquals(9, len(hosts))
|
|
||||||
just_hosts = [host for host, caps in hosts]
|
|
||||||
just_hosts.sort()
|
|
||||||
for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
|
|
||||||
self.assertEquals('host%02d' % index, host)
|
|
||||||
|
|
||||||
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
|
|
||||||
cooked = json.dumps(raw)
|
|
||||||
hosts = hf.filter_hosts(self.zone_manager, cooked)
|
|
||||||
|
|
||||||
self.assertEquals(5, len(hosts))
|
|
||||||
just_hosts = [host for host, caps in hosts]
|
|
||||||
just_hosts.sort()
|
|
||||||
for index, host in zip([2, 4, 6, 8, 10], just_hosts):
|
|
||||||
self.assertEquals('host%02d' % index, host)
|
|
||||||
|
|
||||||
# Try some bogus input ...
|
|
||||||
raw = ['unknown command', ]
|
|
||||||
cooked = json.dumps(raw)
|
|
||||||
try:
|
|
||||||
hf.filter_hosts(self.zone_manager, cooked)
|
|
||||||
self.fail("Should give KeyError")
|
|
||||||
except KeyError, e:
|
|
||||||
pass
|
|
||||||
|
|
||||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([])))
|
|
||||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({})))
|
|
||||||
self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps(
|
|
||||||
['not', True, False, True, False])))
|
|
||||||
|
|
||||||
try:
|
|
||||||
hf.filter_hosts(self.zone_manager, json.dumps(
|
|
||||||
'not', True, False, True, False))
|
|
||||||
self.fail("Should give KeyError")
|
|
||||||
except KeyError, e:
|
|
||||||
pass
|
|
||||||
|
|
||||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
|
||||||
json.dumps(['=', '$foo', 100])))
|
|
||||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
|
||||||
json.dumps(['=', '$.....', 100])))
|
|
||||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
|
||||||
json.dumps(
|
|
||||||
['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]])))
|
|
||||||
|
|
||||||
self.assertFalse(hf.filter_hosts(self.zone_manager,
|
|
||||||
json.dumps(['=', {}, ['>', '$missing....foo']])))
|
|
||||||
Reference in New Issue
Block a user