flatten distributed scheduler

This branch removes AbstractScheduler, BaseScheduler and LeastCostScheduler
and replaces it with DistributedScheduler. Now the points of extension are
handled via the --default_host_filters and --least_cost_functions only.

Also, greatly simplified the zone handling logic in DistibutedScheduler, mostly
by removing the cryptic dicts with helper classes.

Fixed up the Least Cost functions to better deal with multiple functions.
(In a followup patch I will removed the normalization that occurs as this will
be a problem).

Tests were mostly rewritten to support this new world order.

Debated removing JSONFilter since it's not accessible from the outside world,
but decided to keep it as there are discussions afoot on making scheduler
changes without having to redeploy code or restart services.

HostFilters once again get the all the host service capabilities, but now
via a HostInfo class that mostly contains read-only dicts of capabilities.

Virtual resource consumption is done in the DistributedScheduler class now.
The filters/weighing functions don't need to worry about this. Also, weighing
functions only have to return a single host and not worry about the number of
instances requested.

Change-Id: I92600a4a9c58b1add775c328a18d8f48c305861e
This commit is contained in:
Sandy Walsh 2011-10-28 08:48:10 -07:00
parent 44d9647e7f
commit d8e1b8f210
19 changed files with 1233 additions and 1292 deletions

View File

@ -40,6 +40,7 @@
<paul@openstack.org> <pvoccio@castor.local>
<rconradharris@gmail.com> <rick.harris@rackspace.com>
<rlane@wikimedia.org> <laner@controller>
<sandy.walsh@rackspace.com> <sandy@sandywalsh.com>
<sleepsonthefloor@gmail.com> <root@tonbuntu>
<soren.hansen@rackspace.com> <soren@linux2go.dk>
<throughnothing@gmail.com> <will.wolf@rackspace.com>

View File

@ -158,6 +158,11 @@ def compute_node_get(context, compute_id, session=None):
return IMPL.compute_node_get(context, compute_id)
def compute_node_get_all(context, session=None):
"""Get all computeNodes."""
return IMPL.compute_node_get_all(context)
def compute_node_create(context, values):
"""Create a computeNode from the values dictionary."""
return IMPL.compute_node_create(context, values)

View File

@ -366,6 +366,16 @@ def compute_node_get(context, compute_id, session=None):
return result
@require_admin_context
def compute_node_get_all(context, session=None):
if not session:
session = get_session()
return session.query(models.ComputeNode).\
options(joinedload('service')).\
filter_by(deleted=can_read_deleted(context))
@require_admin_context
def compute_node_create(context, values):
compute_node_ref = models.ComputeNode()

View File

@ -1,315 +0,0 @@
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The AbsractScheduler is an abstract class Scheduler for creating instances
locally or across zones. Two methods should be overridden in order to
customize the behavior: filter_hosts() and weigh_hosts(). The default
behavior is to simply select all hosts and weight them the same.
"""
import json
import operator
import M2Crypto
from novaclient import v1_1 as novaclient
from novaclient import exceptions as novaclient_exceptions
from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from nova.compute import api as compute_api
from nova.scheduler import api
from nova.scheduler import driver
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.scheduler.abstract_scheduler')
class InvalidBlob(exception.NovaException):
message = _("Ill-formed or incorrectly routed 'blob' data sent "
"to instance create request.")
class AbstractScheduler(driver.Scheduler):
"""Base class for creating Schedulers that can work across any nova
deployment, from simple designs to multiply-nested zones.
"""
def _call_zone_method(self, context, method, specs, zones):
"""Call novaclient zone method. Broken out for testing."""
return api.call_zone_method(context, method, specs=specs, zones=zones)
def _provision_resource_locally(self, context, build_plan_item,
request_spec, kwargs):
"""Create the requested resource in this Zone."""
host = build_plan_item['hostname']
instance = self.create_instance_db_entry(context, request_spec)
driver.cast_to_compute_host(context, host,
'run_instance', instance_id=instance['id'], **kwargs)
return driver.encode_instance(instance, local=True)
def _decrypt_blob(self, blob):
"""Returns the decrypted blob or None if invalid. Broken out
for testing.
"""
decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
try:
json_entry = decryptor(blob)
return json.dumps(json_entry)
except M2Crypto.EVP.EVPError:
pass
return None
def _ask_child_zone_to_create_instance(self, context, zone_info,
request_spec, kwargs):
"""Once we have determined that the request should go to one
of our children, we need to fabricate a new POST /servers/
call with the same parameters that were passed into us.
Note that we have to reverse engineer from our args to get back the
image, flavor, ipgroup, etc. since the original call could have
come in from EC2 (which doesn't use these things).
"""
instance_type = request_spec['instance_type']
instance_properties = request_spec['instance_properties']
name = instance_properties['display_name']
image_ref = instance_properties['image_ref']
meta = instance_properties['metadata']
flavor_id = instance_type['flavorid']
reservation_id = instance_properties['reservation_id']
files = kwargs['injected_files']
child_zone = zone_info['child_zone']
child_blob = zone_info['child_blob']
zone = db.zone_get(context.elevated(), child_zone)
url = zone.api_url
LOG.debug(_("Forwarding instance create call to child zone %(url)s"
". ReservationID=%(reservation_id)s") % locals())
nova = None
try:
nova = novaclient.Client(zone.username, zone.password, None, url,
token=context.auth_token)
nova.authenticate()
except novaclient_exceptions.BadRequest, e:
raise exception.NotAuthorized(_("Bad credentials attempting "
"to talk to zone at %(url)s.") % locals())
# NOTE(Vek): Novaclient has two different calling conventions
# for this call, depending on whether you're using
# 1.0 or 1.1 API: in 1.0, there's an ipgroups
# argument after flavor_id which isn't present in
# 1.1. To work around this, all the extra
# arguments are passed as keyword arguments
# (there's a reasonable default for ipgroups in the
# novaclient call).
instance = nova.servers.create(name, image_ref, flavor_id,
meta=meta, files=files, zone_blob=child_blob,
reservation_id=reservation_id)
return driver.encode_instance(instance._info, local=False)
def _provision_resource_from_blob(self, context, build_plan_item,
request_spec, kwargs):
"""Create the requested resource locally or in a child zone
based on what is stored in the zone blob info.
Attempt to decrypt the blob to see if this request is:
1. valid, and
2. intended for this zone or a child zone.
Note: If we have "blob" that means the request was passed
into us from a parent zone. If we have "child_blob" that
means we gathered the info from one of our children.
It's possible that, when we decrypt the 'blob' field, it
contains "child_blob" data. In which case we forward the
request.
"""
host_info = None
if "blob" in build_plan_item:
# Request was passed in from above. Is it for us?
host_info = self._decrypt_blob(build_plan_item['blob'])
elif "child_blob" in build_plan_item:
# Our immediate child zone provided this info ...
host_info = build_plan_item
if not host_info:
raise InvalidBlob()
# Valid data ... is it for us?
if 'child_zone' in host_info and 'child_blob' in host_info:
instance = self._ask_child_zone_to_create_instance(context,
host_info, request_spec, kwargs)
else:
instance = self._provision_resource_locally(context,
host_info, request_spec, kwargs)
return instance
def _provision_resource(self, context, build_plan_item,
request_spec, kwargs):
"""Create the requested resource in this Zone or a child zone."""
if "hostname" in build_plan_item:
return self._provision_resource_locally(context,
build_plan_item, request_spec, kwargs)
return self._provision_resource_from_blob(context,
build_plan_item, request_spec, kwargs)
def _adjust_child_weights(self, child_results, zones):
"""Apply the Scale and Offset values from the Zone definition
to adjust the weights returned from the child zones. Alters
child_results in place.
"""
for zone_id, result in child_results:
if not result:
continue
for zone_rec in zones:
if zone_rec['id'] != zone_id:
continue
for item in result:
try:
offset = zone_rec['weight_offset']
scale = zone_rec['weight_scale']
raw_weight = item['weight']
cooked_weight = offset + scale * raw_weight
item['weight'] = cooked_weight
item['raw_weight'] = raw_weight
except KeyError:
LOG.exception(_("Bad child zone scaling values "
"for Zone: %(zone_id)s") % locals())
def schedule_run_instance(self, context, request_spec, *args, **kwargs):
"""This method is called from nova.compute.api to provision
an instance. However we need to look at the parameters being
passed in to see if this is a request to:
1. Create a Build Plan and then provision, or
2. Use the Build Plan information in the request parameters
to simply create the instance (either in this zone or
a child zone).
returns list of instances created.
"""
# TODO(sandy): We'll have to look for richer specs at some point.
blob = request_spec.get('blob')
if blob:
instance = self._provision_resource(context,
request_spec, request_spec, kwargs)
# Caller expects a list of instances
return [instance]
num_instances = request_spec.get('num_instances', 1)
LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
locals())
# Create build plan and provision ...
build_plan = self.select(context, request_spec)
if not build_plan:
raise driver.NoValidHost(_('No hosts were available'))
instances = []
for num in xrange(num_instances):
if not build_plan:
break
build_plan_item = build_plan.pop(0)
instance = self._provision_resource(context,
build_plan_item, request_spec, kwargs)
instances.append(instance)
return instances
def select(self, context, request_spec, *args, **kwargs):
"""Select returns a list of weights and zone/host information
corresponding to the best hosts to service the request. Any
child zone information has been encrypted so as not to reveal
anything about the children.
"""
return self._schedule(context, "compute", request_spec,
*args, **kwargs)
def schedule(self, context, topic, method, *args, **kwargs):
"""The schedule() contract requires we return the one
best-suited host for this request.
"""
# TODO(sandy): We're only focused on compute instances right now,
# so we don't implement the default "schedule()" method required
# of Schedulers.
msg = _("No host selection for %s defined." % topic)
raise driver.NoValidHost(msg)
def _schedule(self, context, topic, request_spec, *args, **kwargs):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
if topic != "compute":
msg = _("Scheduler only understands Compute nodes (for now)")
raise NotImplementedError(msg)
# Get all available hosts.
all_hosts = self.zone_manager.service_states.iteritems()
unfiltered_hosts = [(host, services[topic])
for host, services in all_hosts
if topic in services]
# Filter local hosts based on requirements ...
filtered_hosts = self.filter_hosts(topic, request_spec,
unfiltered_hosts)
# weigh the selected hosts.
# weighted_hosts = [{weight=weight, hostname=hostname,
# capabilities=capabs}, ...]
weighted_hosts = self.weigh_hosts(request_spec, filtered_hosts)
# Next, tack on the host weights from the child zones
json_spec = json.dumps(request_spec)
all_zones = db.zone_get_all(context.elevated())
child_results = self._call_zone_method(context, "select",
specs=json_spec, zones=all_zones)
self._adjust_child_weights(child_results, all_zones)
for child_zone, result in child_results:
for weighting in result:
# Remember the child_zone so we can get back to
# it later if needed. This implicitly builds a zone
# path structure.
host_dict = {"weight": weighting["weight"],
"child_zone": child_zone,
"child_blob": weighting["blob"]}
weighted_hosts.append(host_dict)
weighted_hosts.sort(key=operator.itemgetter('weight'))
return weighted_hosts
def filter_hosts(self, topic, request_spec, host_list):
"""Filter the full host list returned from the ZoneManager. By default,
this method only applies the basic_ram_filter(), meaning all hosts
with at least enough RAM for the requested instance are returned.
Override in subclasses to provide greater selectivity.
"""
def basic_ram_filter(hostname, capabilities, request_spec):
"""Only return hosts with sufficient available RAM."""
instance_type = request_spec['instance_type']
requested_mem = instance_type['memory_mb'] * 1024 * 1024
return capabilities['host_memory_free'] >= requested_mem
return [(host, services) for host, services in host_list
if basic_ram_filter(host, services, request_spec)]
def weigh_hosts(self, request_spec, hosts):
"""This version assigns a weight of 1 to all hosts, making selection
of any host basically a random event. Override this method in your
subclass to add logic to prefer one potential host over another.
"""
return [dict(weight=1, hostname=hostname, capabilities=capabilities)
for hostname, capabilities in hosts]

View File

@ -1,94 +0,0 @@
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The BaseScheduler is the base class Scheduler for creating instances
across zones. There are two expansion points to this class for:
1. Assigning Weights to hosts for requested instances
2. Filtering Hosts based on required instance capabilities
"""
from nova import flags
from nova import log as logging
from nova.scheduler import abstract_scheduler
from nova.scheduler import host_filter
FLAGS = flags.FLAGS
flags.DEFINE_boolean('spread_first', False,
'Use a spread-first zone scheduler strategy')
LOG = logging.getLogger('nova.scheduler.base_scheduler')
class BaseScheduler(abstract_scheduler.AbstractScheduler):
"""Base class for creating Schedulers that can work across any nova
deployment, from simple designs to multiply-nested zones.
"""
def filter_hosts(self, topic, request_spec, hosts=None):
"""Filter the full host list (from the ZoneManager)"""
filters = request_spec.get('filter')
if filters is None:
# Not specified; use the default
filters = FLAGS.default_host_filters
if not isinstance(filters, (list, tuple)):
filters = [filters]
if hosts is None:
# Get the full list (only considering 'compute' services)
all_hosts = self.zone_manager.service_states.iteritems()
hosts = [(host, services["compute"])
for host, services in all_hosts
if "compute" in services]
# Make sure that the requested filters are legitimate.
selected_filters = host_filter.choose_host_filters(filters)
# TODO(sandy): We're only using InstanceType-based specs
# currently. Later we'll need to snoop for more detailed
# host filter requests.
instance_type = request_spec.get("instance_type", None)
if instance_type is None:
# No way to select; return the specified hosts
return hosts
for selected_filter in selected_filters:
query = selected_filter.instance_type_to_filter(instance_type)
hosts = selected_filter.filter_hosts(hosts, query)
return hosts
def weigh_hosts(self, request_spec, hosts):
"""Derived classes may override this to provide more sophisticated
scheduling objectives
"""
# Make sure if there are compute hosts to serve the request.
if not hosts:
return []
# NOTE(sirp): The default logic is the same as the NoopCostFunction
hosts = [dict(weight=1, hostname=hostname, capabilities=capabilities)
for hostname, capabilities in hosts]
# NOTE(Vek): What we actually need to return is enough hosts
# for all the instances!
num_instances = request_spec.get('num_instances', 1)
instances = []
while num_instances > len(hosts):
instances.extend(hosts)
num_instances -= len(hosts)
if num_instances > 0:
instances.extend(hosts[:num_instances])
# Adjust the weights for a spread-first strategy
if FLAGS.spread_first:
for i, host in enumerate(hosts):
host['weight'] = i + 1
return instances

View File

@ -0,0 +1,363 @@
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The DistributedScheduler is for creating instances locally or across zones.
You can customize this scheduler by specifying your own Host Filters and
Weighing Functions.
"""
import json
import operator
import types
import M2Crypto
from novaclient import v1_1 as novaclient
from novaclient import exceptions as novaclient_exceptions
from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
from nova.compute import api as compute_api
from nova.scheduler import api
from nova.scheduler import driver
from nova.scheduler import filters
from nova.scheduler import least_cost
FLAGS = flags.FLAGS
flags.DEFINE_list('default_host_filters', ['InstanceTypeFilter'],
'Which filters to use for filtering hosts when not specified '
'in the request.')
LOG = logging.getLogger('nova.scheduler.distributed_scheduler')
class InvalidBlob(exception.NovaException):
message = _("Ill-formed or incorrectly routed 'blob' data sent "
"to instance create request.")
class DistributedScheduler(driver.Scheduler):
"""Scheduler that can work across any nova deployment, from simple
deployments to multiple nested zones.
"""
def schedule(self, context, topic, method, *args, **kwargs):
"""The schedule() contract requires we return the one
best-suited host for this request.
NOTE: We're only focused on compute instances right now,
so this method will always raise NoValidHost()."""
msg = _("No host selection for %s defined." % topic)
raise driver.NoValidHost(msg)
def schedule_run_instance(self, context, request_spec, *args, **kwargs):
"""This method is called from nova.compute.api to provision
an instance. However we need to look at the parameters being
passed in to see if this is a request to:
1. Create build plan (a list of WeightedHosts) and then provision, or
2. Use the WeightedHost information in the request parameters
to simply create the instance (either in this zone or
a child zone).
returns a list of the instances created.
"""
elevated = context.elevated()
num_instances = request_spec.get('num_instances', 1)
LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
locals())
weighted_hosts = []
# Having a 'blob' hint means we've already provided a build plan.
# We need to turn this back into a WeightedHost object.
blob = request_spec.get('blob', None)
if blob:
weighted_hosts.append(self._make_weighted_host_from_blob(blob))
else:
# No plan ... better make one.
weighted_hosts = self._schedule(elevated, "compute", request_spec,
*args, **kwargs)
if not weighted_hosts:
raise driver.NoValidHost(_('No hosts were available'))
instances = []
for num in xrange(num_instances):
if not weighted_hosts:
break
weighted_host = weighted_hosts.pop(0)
instance = None
if weighted_host.host:
instance = self._provision_resource_locally(elevated,
weighted_host, request_spec, kwargs)
else:
instance = self._ask_child_zone_to_create_instance(elevated,
weighted_host, request_spec, kwargs)
if instance:
instances.append(instance)
return instances
def select(self, context, request_spec, *args, **kwargs):
"""Select returns a list of weights and zone/host information
corresponding to the best hosts to service the request. Any
internal zone information will be encrypted so as not to reveal
anything about our inner layout.
"""
elevated = context.elevated()
weighted_hosts = self._schedule(elevated, "compute", request_spec,
*args, **kwargs)
return [weighted_host.to_dict() for weighted_host in weighted_hosts]
def _call_zone_method(self, context, method, specs, zones):
"""Call novaclient zone method. Broken out for testing."""
return api.call_zone_method(context, method, specs=specs, zones=zones)
def _provision_resource_locally(self, context, weighted_host, request_spec,
kwargs):
"""Create the requested resource in this Zone."""
instance = self.create_instance_db_entry(context, request_spec)
driver.cast_to_compute_host(context, weighted_host.host,
'run_instance', instance_id=instance['id'], **kwargs)
return driver.encode_instance(instance, local=True)
def _make_weighted_host_from_blob(self, blob):
"""Returns the decrypted blob as a WeightedHost object
or None if invalid. Broken out for testing.
"""
decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key)
try:
json_entry = decryptor(blob)
# Extract our WeightedHost values
wh_dict = json.loads(json_entry)
host = wh_dict.get('host', None)
blob = wh_dict.get('blob', None)
zone = wh_dict.get('zone', None)
return least_cost.WeightedHost(wh_dict['weight'],
host=host, blob=blob, zone=zone)
except M2Crypto.EVP.EVPError:
raise InvalidBlob()
def _ask_child_zone_to_create_instance(self, context, weighted_host,
request_spec, kwargs):
"""Once we have determined that the request should go to one
of our children, we need to fabricate a new POST /servers/
call with the same parameters that were passed into us.
This request is always for a single instance.
Note that we have to reverse engineer from our args to get back the
image, flavor, ipgroup, etc. since the original call could have
come in from EC2 (which doesn't use these things).
"""
instance_type = request_spec['instance_type']
instance_properties = request_spec['instance_properties']
name = instance_properties['display_name']
image_ref = instance_properties['image_ref']
meta = instance_properties['metadata']
flavor_id = instance_type['flavorid']
reservation_id = instance_properties['reservation_id']
files = kwargs['injected_files']
zone = db.zone_get(context.elevated(), weighted_host.zone)
zone_name = zone.name
url = zone.api_url
LOG.debug(_("Forwarding instance create call to zone '%(zone_name)s'. "
"ReservationID=%(reservation_id)s") % locals())
nova = None
try:
# This operation is done as the caller, not the zone admin.
nova = novaclient.Client(zone.username, zone.password, None, url,
token=context.auth_token,
region_name=zone_name)
nova.authenticate()
except novaclient_exceptions.BadRequest, e:
raise exception.NotAuthorized(_("Bad credentials attempting "
"to talk to zone at %(url)s.") % locals())
# NOTE(Vek): Novaclient has two different calling conventions
# for this call, depending on whether you're using
# 1.0 or 1.1 API: in 1.0, there's an ipgroups
# argument after flavor_id which isn't present in
# 1.1. To work around this, all the extra
# arguments are passed as keyword arguments
# (there's a reasonable default for ipgroups in the
# novaclient call).
instance = nova.servers.create(name, image_ref, flavor_id,
meta=meta, files=files,
zone_blob=weighted_host.blob,
reservation_id=reservation_id)
return driver.encode_instance(instance._info, local=False)
def _adjust_child_weights(self, child_results, zones):
"""Apply the Scale and Offset values from the Zone definition
to adjust the weights returned from the child zones. Returns
a list of WeightedHost objects: [WeightedHost(), ...]
"""
weighted_hosts = []
for zone_id, result in child_results:
if not result:
continue
for zone_rec in zones:
if zone_rec['id'] != zone_id:
continue
for item in result:
try:
offset = zone_rec['weight_offset']
scale = zone_rec['weight_scale']
raw_weight = item['weight']
cooked_weight = offset + scale * raw_weight
weighted_hosts.append(least_cost.WeightedHost(
host=None, weight=cooked_weight,
zone=zone_id, blob=item['blob']))
except KeyError:
LOG.exception(_("Bad child zone scaling values "
"for Zone: %(zone_id)s") % locals())
return weighted_hosts
def _zone_get_all(self, context):
"""Broken out for testing."""
return db.zone_get_all(context)
def _schedule(self, elevated, topic, request_spec, *args, **kwargs):
"""Returns a list of hosts that meet the required specs,
ordered by their fitness.
"""
if topic != "compute":
msg = _("Scheduler only understands Compute nodes (for now)")
raise NotImplementedError(msg)
instance_type = request_spec.get("instance_type", None)
if not instance_type:
msg = _("Scheduler only understands InstanceType-based" \
"provisioning.")
raise NotImplementedError(msg)
ram_requirement_mb = instance_type['memory_mb']
disk_requirement_bg = instance_type['local_gb']
# Find our local list of acceptable hosts by repeatedly
# filtering and weighing our options. Each time we choose a
# host, we virtually consume resources on it so subsequent
# selections can adjust accordingly.
# unfiltered_hosts_dict is {host : ZoneManager.HostInfo()}
unfiltered_hosts_dict = self.zone_manager.get_all_host_data(elevated)
unfiltered_hosts = unfiltered_hosts_dict.items()
num_instances = request_spec.get('num_instances', 1)
selected_hosts = []
for num in xrange(num_instances):
# Filter local hosts based on requirements ...
filtered_hosts = self._filter_hosts(topic, request_spec,
unfiltered_hosts)
if not filtered_hosts:
# Can't get any more locally.
break
LOG.debug(_("Filtered %(filtered_hosts)s") % locals())
# weighted_host = WeightedHost() ... the best
# host for the job.
weighted_host = least_cost.weigh_hosts(request_spec,
filtered_hosts)
LOG.debug(_("Weighted %(weighted_host)s") % locals())
selected_hosts.append(weighted_host)
# Now consume the resources so the filter/weights
# will change for the next instance.
weighted_host.hostinfo.consume_resources(disk_requirement_bg,
ram_requirement_mb)
# Next, tack on the host weights from the child zones
json_spec = json.dumps(request_spec)
all_zones = self._zone_get_all(elevated)
child_results = self._call_zone_method(elevated, "select",
specs=json_spec, zones=all_zones)
selected_hosts.extend(self._adjust_child_weights(
child_results, all_zones))
selected_hosts.sort(key=operator.attrgetter('weight'))
return selected_hosts[:num_instances]
def _get_filter_classes(self):
# Imported here to avoid circular imports
from nova.scheduler import filters
def get_itm(nm):
return getattr(filters, nm)
return [get_itm(itm) for itm in dir(filters)
if (type(get_itm(itm)) is types.TypeType)
and issubclass(get_itm(itm), filters.AbstractHostFilter)
and get_itm(itm) is not filters.AbstractHostFilter]
def _choose_host_filters(self, filters=None):
"""Since the caller may specify which filters to use we need
to have an authoritative list of what is permissible. This
function checks the filter names against a predefined set
of acceptable filters.
"""
if not filters:
filters = FLAGS.default_host_filters
if not isinstance(filters, (list, tuple)):
filters = [filters]
good_filters = []
bad_filters = []
filter_classes = self._get_filter_classes()
for filter_name in filters:
found_class = False
for cls in filter_classes:
if cls.__name__ == filter_name:
good_filters.append(cls())
found_class = True
break
if not found_class:
bad_filters.append(filter_name)
if bad_filters:
msg = ", ".join(bad_filters)
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters
def _filter_hosts(self, topic, request_spec, hosts=None):
"""Filter the full host list. hosts = [(host, HostInfo()), ...].
This method returns a subset of hosts, in the same format."""
selected_filters = self._choose_host_filters()
# TODO(sandy): We're only using InstanceType-based specs
# currently. Later we'll need to snoop for more detailed
# host filter requests.
instance_type = request_spec.get("instance_type", None)
if instance_type is None:
# No way to select; return the specified hosts.
return hosts
for selected_filter in selected_filters:
query = selected_filter.instance_type_to_filter(instance_type)
hosts = selected_filter.filter_hosts(hosts, query)
return hosts

View File

@ -13,11 +13,15 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import nova.scheduler
from nova.scheduler.filters import abstract_filter
LOG = logging.getLogger('nova.scheduler.filter.instance_type_filter')
class InstanceTypeFilter(abstract_filter.AbstractHostFilter):
"""HostFilter hard-coded to work with InstanceType records."""
def instance_type_to_filter(self, instance_type):
@ -29,6 +33,7 @@ class InstanceTypeFilter(abstract_filter.AbstractHostFilter):
satisfy the extra specs associated with the instance type"""
if 'extra_specs' not in instance_type:
return True
# NOTE(lorinh): For now, we are just checking exact matching on the
# values. Later on, we want to handle numerical
# values so we can represent things like number of GPU cards
@ -36,58 +41,31 @@ class InstanceTypeFilter(abstract_filter.AbstractHostFilter):
for key, value in instance_type['extra_specs'].iteritems():
if capabilities[key] != value:
return False
except KeyError:
except KeyError, e:
return False
return True
def _basic_ram_filter(self, host_name, host_info, instance_type):
"""Only return hosts with sufficient available RAM."""
requested_ram = instance_type['memory_mb']
free_ram_mb = host_info.free_ram_mb
return free_ram_mb >= requested_ram
def filter_hosts(self, host_list, query):
"""Return a list of hosts that can create instance_type."""
instance_type = query
selected_hosts = []
for host, capabilities in host_list:
# In case the capabilities have not yet been extracted from
# the zone manager's services dict...
capabilities = capabilities.get("compute", capabilities)
if not capabilities:
for hostname, host_info in host_list:
if not self._basic_ram_filter(hostname, host_info,
instance_type):
continue
if not capabilities.get("enabled", True):
# Host is disabled
continue
host_ram_mb = capabilities['host_memory_free']
disk_bytes = capabilities['disk_available']
spec_ram = instance_type['memory_mb']
spec_disk = instance_type['local_gb']
extra_specs = instance_type['extra_specs']
capabilities = host_info.compute
if capabilities:
if not capabilities.get("enabled", True):
continue
if not self._satisfies_extra_specs(capabilities,
instance_type):
continue
if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and
self._satisfies_extra_specs(capabilities, instance_type)):
selected_hosts.append((host, capabilities))
selected_hosts.append((hostname, host_info))
return selected_hosts
# host entries (currently) are like:
# {'host_name-description': 'Default install of XenServer',
# 'host_hostname': 'xs-mini',
# 'host_memory_total': 8244539392,
# 'host_memory_overhead': 184225792,
# 'host_memory_free': 3868327936,
# 'host_memory_free_computed': 3840843776,
# 'host_other_config': {},
# 'host_ip_address': '192.168.1.109',
# 'host_cpu_info': {},
# 'enabled': True,
# 'disk_available': 32954957824,
# 'disk_total': 50394562560,
# 'disk_used': 17439604736,
# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
# 'host_name_label': 'xs-mini'}
# instance_type table has:
# name = Column(String(255), unique=True)
# memory_mb = Column(Integer)
# vcpus = Column(Integer)
# local_gb = Column(Integer)
# flavorid = Column(Integer, unique=True)
# swap = Column(Integer, nullable=False, default=0)
# rxtx_quota = Column(Integer, nullable=False, default=0)
# rxtx_cap = Column(Integer, nullable=False, default=0)

View File

@ -96,7 +96,7 @@ class JsonFilter(abstract_filter.AbstractHostFilter):
['>=', '$compute.disk_available', required_disk]]
return json.dumps(query)
def _parse_string(self, string, host, services):
def _parse_string(self, string, host, hostinfo):
"""Strings prefixed with $ are capability lookups in the
form '$service.capability[.subcap*]'.
"""
@ -106,13 +106,18 @@ class JsonFilter(abstract_filter.AbstractHostFilter):
return string
path = string[1:].split(".")
for item in path:
services = services.get(item, None)
if not services:
services = dict(compute=hostinfo.compute, network=hostinfo.network,
volume=hostinfo.volume)
service = services.get(path[0], None)
if not service:
return None
for item in path[1:]:
service = service.get(item, None)
if not service:
return None
return services
return service
def _process_filter(self, query, host, services):
def _process_filter(self, query, host, hostinfo):
"""Recursively parse the query structure."""
if not query:
return True
@ -121,9 +126,9 @@ class JsonFilter(abstract_filter.AbstractHostFilter):
cooked_args = []
for arg in query[1:]:
if isinstance(arg, list):
arg = self._process_filter(arg, host, services)
arg = self._process_filter(arg, host, hostinfo)
elif isinstance(arg, basestring):
arg = self._parse_string(arg, host, services)
arg = self._parse_string(arg, host, hostinfo)
if arg is not None:
cooked_args.append(arg)
result = method(self, cooked_args)
@ -135,16 +140,16 @@ class JsonFilter(abstract_filter.AbstractHostFilter):
"""
expanded = json.loads(query)
filtered_hosts = []
for host, capabilities in host_list:
if not capabilities:
for host, hostinfo in host_list:
if not hostinfo:
continue
if not capabilities.get("enabled", True):
if hostinfo.compute and not hostinfo.compute.get("enabled", True):
# Host is disabled
continue
result = self._process_filter(expanded, host, capabilities)
result = self._process_filter(expanded, host, hostinfo)
if isinstance(result, list):
# If any succeeded, include the host
result = any(result)
if result:
filtered_hosts.append((host, capabilities))
filtered_hosts.append((host, hostinfo))
return filtered_hosts

View File

@ -1,82 +0,0 @@
# Copyright (c) 2011 Openstack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Host Filter classes are a way to ensure that only hosts that are
appropriate are considered when creating a new instance. Hosts that are
either incompatible or insufficient to accept a newly-requested instance
are removed by Host Filter classes from consideration. Those that pass
the filter are then passed on for weighting or other process for ordering.
Filters are in the 'filters' directory that is off the 'scheduler'
directory of nova. Additional filters can be created and added to that
directory; be sure to add them to the filters/__init__.py file so that
they are part of the nova.schedulers.filters namespace.
"""
import types
from nova import exception
from nova import flags
import nova.scheduler
from nova.scheduler import filters
FLAGS = flags.FLAGS
flags.DEFINE_list('default_host_filters', ['AllHostsFilter'],
'Which filters to use for filtering hosts when not specified '
'in the request.')
def _get_filter_classes():
# Imported here to avoid circular imports
from nova.scheduler import filters
def get_itm(nm):
return getattr(filters, nm)
return [get_itm(itm) for itm in dir(filters)
if (type(get_itm(itm)) is types.TypeType)
and issubclass(get_itm(itm), filters.AbstractHostFilter)
and get_itm(itm) is not filters.AbstractHostFilter]
def choose_host_filters(filters=None):
"""Since the caller may specify which filters to use we need
to have an authoritative list of what is permissible. This
function checks the filter names against a predefined set
of acceptable filters.
"""
if not filters:
filters = FLAGS.default_host_filters
if not isinstance(filters, (list, tuple)):
filters = [filters]
good_filters = []
bad_filters = []
filter_classes = _get_filter_classes()
for filter_name in filters:
found_class = False
for cls in filter_classes:
if cls.__name__ == filter_name:
good_filters.append(cls())
found_class = True
break
if not found_class:
bad_filters.append(filter_name)
if bad_filters:
msg = ", ".join(bad_filters)
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters

View File

@ -13,10 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
"""
Least Cost Scheduler is a mechanism for choosing which host machines to
provision a set of resources to. The input of the least-cost-scheduler is a
set of objective-functions, called the 'cost-functions', a weight for each
cost-function, and a list of candidate hosts (gathered via FilterHosts).
Least Cost is an algorithm for choosing which host machines to
provision a set of resources to. The input is a WeightedHost object which
is decided upon by a set of objective-functions, called the 'cost-functions'.
The WeightedHost contains a combined weight for each cost-function.
The cost-function and weights are tabulated, and the host with the least cost
is then selected for provisioning.
@ -27,147 +27,170 @@ import collections
from nova import flags
from nova import log as logging
from nova.scheduler import base_scheduler
from nova import utils
from nova import exception
LOG = logging.getLogger('nova.scheduler.least_cost')
FLAGS = flags.FLAGS
flags.DEFINE_list('least_cost_scheduler_cost_functions',
['nova.scheduler.least_cost.noop_cost_fn'],
flags.DEFINE_list('least_cost_functions',
['nova.scheduler.least_cost.compute_fill_first_cost_fn'],
'Which cost functions the LeastCostScheduler should use.')
# TODO(sirp): Once we have enough of these rules, we can break them out into a
# cost_functions.py file (perhaps in a least_cost_scheduler directory)
flags.DEFINE_integer('noop_cost_fn_weight', 1,
flags.DEFINE_float('noop_cost_fn_weight', 1.0,
'How much weight to give the noop cost function')
flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
flags.DEFINE_float('compute_fill_first_cost_fn_weight', 1.0,
'How much weight to give the fill-first cost function')
def noop_cost_fn(host):
COST_FUNCTION_CACHE = {}
class WeightedHost(object):
"""Reduced set of information about a host that has been weighed.
This is an attempt to remove some of the ad-hoc dict structures
previously used."""
def __init__(self, weight, host=None, blob=None, zone=None, hostinfo=None):
self.weight = weight
self.blob = blob
self.host = host
self.zone = zone
# Local members. These are not returned outside of the Zone.
self.hostinfo = hostinfo
def to_dict(self):
x = dict(weight=self.weight)
if self.blob:
x['blob'] = self.blob
if self.host:
x['host'] = self.host
if self.zone:
x['zone'] = self.zone
return x
def noop_cost_fn(host_info):
"""Return a pre-weight cost of 1 for each host"""
return 1
def compute_fill_first_cost_fn(host):
"""Prefer hosts that have less ram available, filter_hosts will exclude
hosts that don't have enough ram.
"""
hostname, service = host
caps = service.get("compute", {})
free_mem = caps.get("host_memory_free", 0)
return free_mem
def compute_fill_first_cost_fn(host_info):
"""More free ram = higher weight. So servers will less free
ram will be preferred."""
return host_info.free_ram_mb
def normalize_list(L):
"""Normalize an array of numbers such that each element satisfies:
0 <= e <= 1
"""
if not L:
return L
max_ = max(L)
if max_ > 0:
return [(float(e) / max_) for e in L]
return L
def normalize_grid(grid):
"""Normalize a grid of numbers by row."""
if not grid:
return [[]]
normalized = []
for row in grid:
if not row:
normalized.append([])
continue
mx = float(max(row))
if abs(mx) < 0.001:
normalized = [0.0] * len(row)
continue
normalized.append([float(col) / mx for col in row])
return normalized
def weighted_sum(domain, weighted_fns, normalize=True):
def weighted_sum(host_list, weighted_fns):
"""Use the weighted-sum method to compute a score for an array of objects.
Normalize the results of the objective-functions so that the weights are
meaningful regardless of objective-function's range.
domain - input to be scored
host_list - [(host, HostInfo()), ...]
weighted_fns - list of weights and functions like:
[(weight, objective-functions)]
[(weight, objective-functions), ...]
Returns an unsorted list of scores. To pair with hosts do:
zip(scores, hosts)
Returns a single WeightedHost object which represents the best
candidate.
"""
# Table of form:
# { domain1: [score1, score2, ..., scoreM]
# ...
# domainN: [score1, score2, ..., scoreM] }
score_table = collections.defaultdict(list)
# Make a grid of functions results.
# One row per host. One column per function.
scores = []
for weight, fn in weighted_fns:
scores = [fn(elem) for elem in domain]
if normalize:
norm_scores = normalize_list(scores)
scores.append([fn(host_info) for hostname, host_info in host_list])
scores = normalize_grid(scores)
# Adjust the weights in the grid by the functions weight adjustment
# and sum them up to get a final list of weights.
adjusted_scores = []
for (weight, fn), row in zip(weighted_fns, scores):
adjusted_scores.append([weight * score for score in row])
# Now, sum down the columns to get the final score. Column per host.
final_scores = [0.0] * len(host_list)
for row in adjusted_scores:
for idx, col in enumerate(row):
final_scores[idx] += col
# Super-impose the hostinfo into the scores so
# we don't lose it when we sort.
final_scores = [(final_scores[idx], host_tuple)
for idx, host_tuple in enumerate(host_list)]
final_scores = sorted(final_scores)
weight, (host, hostinfo) = final_scores[0] # Lowest score is the winner!
return WeightedHost(weight, host=host, hostinfo=hostinfo)
def get_cost_fns(topic=None):
"""Returns a list of tuples containing weights and cost functions to
use for weighing hosts
"""
global COST_FUNCTION_CACHE
cost_function_cache = COST_FUNCTION_CACHE
if topic is None:
# Schedulers only support compute right now.
topic = "compute"
if topic in cost_function_cache:
return cost_function_cache[topic]
cost_fns = []
for cost_fn_str in FLAGS.least_cost_functions:
if '.' in cost_fn_str:
short_name = cost_fn_str.split('.')[-1]
else:
norm_scores = scores
for idx, score in enumerate(norm_scores):
weighted_score = score * weight
score_table[idx].append(weighted_score)
short_name = cost_fn_str
cost_fn_str = "%s.%s.%s" % (
__name__, self.__class__.__name__, short_name)
if not (short_name.startswith('%s_' % topic) or
short_name.startswith('noop')):
continue
# Sum rows in table to compute score for each element in domain
domain_scores = []
for idx in sorted(score_table):
elem_score = sum(score_table[idx])
domain_scores.append(elem_score)
return domain_scores
try:
# NOTE(sirp): import_class is somewhat misnamed since it can
# any callable from a module
cost_fn = utils.import_class(cost_fn_str)
except exception.ClassNotFound:
raise exception.SchedulerCostFunctionNotFound(
cost_fn_str=cost_fn_str)
try:
flag_name = "%s_weight" % cost_fn.__name__
weight = getattr(FLAGS, flag_name)
except AttributeError:
raise exception.SchedulerWeightFlagNotFound(
flag_name=flag_name)
cost_fns.append((weight, cost_fn))
cost_function_cache[topic] = cost_fns
return cost_fns
class LeastCostScheduler(base_scheduler.BaseScheduler):
def __init__(self, *args, **kwargs):
self.cost_fns_cache = {}
super(LeastCostScheduler, self).__init__(*args, **kwargs)
def get_cost_fns(self, topic=None):
"""Returns a list of tuples containing weights and cost functions to
use for weighing hosts
"""
if topic is None:
# Schedulers only support compute right now.
topic = "compute"
if topic in self.cost_fns_cache:
return self.cost_fns_cache[topic]
cost_fns = []
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
if '.' in cost_fn_str:
short_name = cost_fn_str.split('.')[-1]
else:
short_name = cost_fn_str
cost_fn_str = "%s.%s.%s" % (
__name__, self.__class__.__name__, short_name)
if not (short_name.startswith('%s_' % topic) or
short_name.startswith('noop')):
continue
try:
# NOTE(sirp): import_class is somewhat misnamed since it can
# any callable from a module
cost_fn = utils.import_class(cost_fn_str)
except exception.ClassNotFound:
raise exception.SchedulerCostFunctionNotFound(
cost_fn_str=cost_fn_str)
try:
flag_name = "%s_weight" % cost_fn.__name__
weight = getattr(FLAGS, flag_name)
except AttributeError:
raise exception.SchedulerWeightFlagNotFound(
flag_name=flag_name)
cost_fns.append((weight, cost_fn))
self.cost_fns_cache[topic] = cost_fns
return cost_fns
def weigh_hosts(self, request_spec, hosts):
"""Returns a list of dictionaries of form:
[ {weight: weight, hostname: hostname, capabilities: capabs} ]
"""
cost_fns = self.get_cost_fns()
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
weighted = []
weight_log = []
for cost, (hostname, caps) in zip(costs, hosts):
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
weight_dict = dict(weight=cost, hostname=hostname,
capabilities=caps)
weighted.append(weight_dict)
LOG.debug(_("Weighted Costs => %s") % weight_log)
return weighted
def weigh_hosts(request_spec, host_list):
"""Returns the best host as a WeightedHost."""
cost_fns = get_cost_fns()
return weighted_sum(host_list, cost_fns)

View File

@ -20,6 +20,7 @@ ZoneManager oversees all communications with child Zones.
import datetime
import thread
import traceback
import UserDict
from novaclient import v1_1 as novaclient
@ -111,6 +112,71 @@ def _poll_zone(zone):
zone.log_error(traceback.format_exc())
class ReadOnlyDict(UserDict.IterableUserDict):
"""A read-only dict."""
def __init__(self, source=None):
self.update(source)
def __setitem__(self, key, item):
raise TypeError
def __delitem__(self, key):
raise TypeError
def clear(self):
raise TypeError
def pop(self, key, *args):
raise TypeError
def popitem(self):
raise TypeError
def update(self, source=None):
if source is None:
return
elif isinstance(source, UserDict.UserDict):
self.data = source.data
elif isinstance(source, type({})):
self.data = source
else:
raise TypeError
class HostInfo(object):
"""Mutable and immutable information on hosts tracked
by the ZoneManager. This is an attempt to remove the
ad-hoc data structures previously used and lock down
access."""
def __init__(self, host, caps=None, free_ram_mb=0, free_disk_gb=0):
self.host = host
# Read-only capability dicts
self.compute = None
self.volume = None
self.network = None
if caps:
self.compute = ReadOnlyDict(caps.get('compute', None))
self.volume = ReadOnlyDict(caps.get('volume', None))
self.network = ReadOnlyDict(caps.get('network', None))
# Mutable available resources.
# These will change as resources are virtually "consumed".
self.free_ram_mb = free_ram_mb
self.free_disk_gb = free_disk_gb
def consume_resources(self, disk_gb, ram_mb):
"""Consume some of the mutable resources."""
self.free_disk_gb -= disk_gb
self.free_ram_mb -= ram_mb
def __repr__(self):
return "%s ram:%s disk:%s" % \
(self.host, self.free_ram_mb, self.free_disk_gb)
class ZoneManager(object):
"""Keeps the zone states updated."""
def __init__(self):
@ -135,6 +201,53 @@ class ZoneManager(object):
ret.append({"service": svc, "host_name": host})
return ret
def _compute_node_get_all(self, context):
"""Broken out for testing."""
return db.compute_node_get_all(context)
def _instance_get_all(self, context):
"""Broken out for testing."""
return db.instance_get_all(context)
def get_all_host_data(self, context):
"""Returns a dict of all the hosts the ZoneManager
knows about. Also, each of the consumable resources in HostInfo
are pre-populated and adjusted based on data in the db.
For example:
{'192.168.1.100': HostInfo(), ...}
Note: this can be very slow with a lot of instances.
InstanceType table isn't required since a copy is stored
with the instance (in case the InstanceType changed since the
instance was created)."""
# Make a compute node dict with the bare essential metrics.
compute_nodes = self._compute_node_get_all(context)
host_info_map = {}
for compute in compute_nodes:
all_disk = compute['local_gb']
all_ram = compute['memory_mb']
host = compute['service']['host']
caps = self.service_states.get(host, None)
host_info_map[host] = HostInfo(host, caps=caps,
free_disk_gb=all_disk,
free_ram_mb=all_ram)
# "Consume" resources from the host the instance resides on.
instances = self._instance_get_all(context)
for instance in instances:
host = instance['host']
host_info = host_info_map.get(host, None)
if not host_info:
continue
disk = instance['local_gb']
ram = instance['memory_mb']
host_info.consume_resources(disk, ram)
return host_info_map
def get_zone_capabilities(self, context):
"""Roll up all the individual host info to generic 'service'
capabilities. Each capability is aggregated into

View File

@ -0,0 +1,72 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fakes For Distributed Scheduler tests.
"""
from nova.scheduler import distributed_scheduler
from nova.scheduler import zone_manager
class FakeDistributedScheduler(distributed_scheduler.DistributedScheduler):
# No need to stub anything at the moment
pass
class FakeZoneManager(zone_manager.ZoneManager):
"""host1: free_ram_mb=1024-512-512=0, free_disk_gb=1024-512-512=0
host2: free_ram_mb=2048-512=1536 free_disk_gb=2048-512=1536
host3: free_ram_mb=4096-1024=3072 free_disk_gb=4096-1024=3072
host4: free_ram_mb=8192 free_disk_gb=8192"""
def __init__(self):
self.service_states = {
'host1': {
'compute': {'host_memory_free': 1073741824},
},
'host2': {
'compute': {'host_memory_free': 2147483648},
},
'host3': {
'compute': {'host_memory_free': 3221225472},
},
'host4': {
'compute': {'host_memory_free': 999999999},
},
}
def get_host_list_from_db(self, context):
return [
('host1', dict(free_disk_gb=1024, free_ram_mb=1024)),
('host2', dict(free_disk_gb=2048, free_ram_mb=2048)),
('host3', dict(free_disk_gb=4096, free_ram_mb=4096)),
('host4', dict(free_disk_gb=8192, free_ram_mb=8192)),
]
def _compute_node_get_all(self, context):
return [
dict(local_gb=1024, memory_mb=1024, service=dict(host='host1')),
dict(local_gb=2048, memory_mb=2048, service=dict(host='host2')),
dict(local_gb=4096, memory_mb=4096, service=dict(host='host3')),
dict(local_gb=8192, memory_mb=8192, service=dict(host='host4')),
]
def _instance_get_all(self, context):
return [
dict(local_gb=512, memory_mb=512, host='host1'),
dict(local_gb=512, memory_mb=512, host='host1'),
dict(local_gb=512, memory_mb=512, host='host2'),
dict(local_gb=1024, memory_mb=1024, host='host3'),
]

View File

@ -1,462 +0,0 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Abstract Scheduler.
"""
import json
import nova.db
from nova import context
from nova import exception
from nova import rpc
from nova import test
from nova.compute import api as compute_api
from nova.scheduler import driver
from nova.scheduler import abstract_scheduler
from nova.scheduler import base_scheduler
from nova.scheduler import zone_manager
def _host_caps(multiplier):
# Returns host capabilities in the following way:
# host1 = memory:free 10 (100max)
# disk:available 100 (1000max)
# hostN = memory:free 10 + 10N
# disk:available 100 + 100N
# in other words: hostN has more resources than host0
# which means ... don't go above 10 hosts.
return {'host_name-description': 'XenServer %s' % multiplier,
'host_hostname': 'xs-%s' % multiplier,
'host_memory_total': 100,
'host_memory_overhead': 10,
'host_memory_free': 10 + multiplier * 10,
'host_memory_free-computed': 10 + multiplier * 10,
'host_other-config': {},
'host_ip_address': '192.168.1.%d' % (100 + multiplier),
'host_cpu_info': {},
'disk_available': 100 + multiplier * 100,
'disk_total': 1000,
'disk_used': 0,
'host_uuid': 'xxx-%d' % multiplier,
'host_name-label': 'xs-%s' % multiplier}
def fake_zone_manager_service_states(num_hosts):
states = {}
for x in xrange(num_hosts):
states['host%02d' % (x + 1)] = {'compute': _host_caps(x)}
return states
class FakeAbstractScheduler(abstract_scheduler.AbstractScheduler):
# No need to stub anything at the moment
pass
class FakeBaseScheduler(base_scheduler.BaseScheduler):
# No need to stub anything at the moment
pass
class FakeZoneManager(zone_manager.ZoneManager):
def __init__(self):
self.service_states = {
'host1': {
'compute': {'host_memory_free': 1073741824},
},
'host2': {
'compute': {'host_memory_free': 2147483648},
},
'host3': {
'compute': {'host_memory_free': 3221225472},
},
'host4': {
'compute': {'host_memory_free': 999999999},
},
}
class FakeEmptyZoneManager(zone_manager.ZoneManager):
def __init__(self):
self.service_states = {}
def fake_empty_call_zone_method(context, method, specs, zones):
return []
# Hmm, I should probably be using mox for this.
was_called = False
def fake_provision_resource(context, item, request_spec, kwargs):
global was_called
was_called = True
def fake_ask_child_zone_to_create_instance(context, zone_info,
request_spec, kwargs):
global was_called
was_called = True
def fake_provision_resource_locally(context, build_plan, request_spec, kwargs):
global was_called
was_called = True
def fake_provision_resource_from_blob(context, item, request_spec, kwargs):
global was_called
was_called = True
def fake_decrypt_blob_returns_local_info(blob):
return {'hostname': 'foooooo'} # values aren't important.
def fake_decrypt_blob_returns_child_info(blob):
return {'child_zone': True,
'child_blob': True} # values aren't important. Keys are.
def fake_call_zone_method(context, method, specs, zones):
return [
(1, [
dict(weight=1, blob='AAAAAAA'),
dict(weight=111, blob='BBBBBBB'),
dict(weight=112, blob='CCCCCCC'),
dict(weight=113, blob='DDDDDDD'),
]),
(2, [
dict(weight=120, blob='EEEEEEE'),
dict(weight=2, blob='FFFFFFF'),
dict(weight=122, blob='GGGGGGG'),
dict(weight=123, blob='HHHHHHH'),
]),
(3, [
dict(weight=130, blob='IIIIIII'),
dict(weight=131, blob='JJJJJJJ'),
dict(weight=132, blob='KKKKKKK'),
dict(weight=3, blob='LLLLLLL'),
]),
]
def fake_zone_get_all(context):
return [
dict(id=1, api_url='zone1',
username='admin', password='password',
weight_offset=0.0, weight_scale=1.0),
dict(id=2, api_url='zone2',
username='admin', password='password',
weight_offset=1000.0, weight_scale=1.0),
dict(id=3, api_url='zone3',
username='admin', password='password',
weight_offset=0.0, weight_scale=1000.0),
]
class AbstractSchedulerTestCase(test.TestCase):
"""Test case for Abstract Scheduler."""
def test_abstract_scheduler(self):
"""
Create a nested set of FakeZones, try to build multiple instances
and ensure that a select call returns the appropriate build plan.
"""
sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
zm = FakeZoneManager()
sched.set_zone_manager(zm)
fake_context = context.RequestContext('user', 'project')
build_plan = sched.select(fake_context,
{'instance_type': {'memory_mb': 512},
'num_instances': 4})
# 4 from local zones, 12 from remotes
self.assertEqual(16, len(build_plan))
hostnames = [plan_item['hostname']
for plan_item in build_plan if 'hostname' in plan_item]
# 4 local hosts
self.assertEqual(4, len(hostnames))
def test_adjust_child_weights(self):
"""Make sure the weights returned by child zones are
properly adjusted based on the scale/offset in the zone
db entries.
"""
sched = FakeAbstractScheduler()
child_results = fake_call_zone_method(None, None, None, None)
zones = fake_zone_get_all(None)
sched._adjust_child_weights(child_results, zones)
scaled = [130000, 131000, 132000, 3000]
for zone, results in child_results:
for item in results:
w = item['weight']
if zone == 'zone1': # No change
self.assertTrue(w < 1000.0)
if zone == 'zone2': # Offset +1000
self.assertTrue(w >= 1000.0 and w < 2000)
if zone == 'zone3': # Scale x1000
self.assertEqual(scaled.pop(0), w)
def test_empty_abstract_scheduler(self):
"""
Ensure empty hosts & child_zones result in NoValidHosts exception.
"""
sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
zm = FakeEmptyZoneManager()
sched.set_zone_manager(zm)
fake_context = context.RequestContext('user', 'project')
request_spec = {}
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
fake_context, request_spec,
dict(host_filter=None, instance_type={}))
def test_schedule_do_not_schedule_with_hint(self):
"""
Check the local/child zone routing in the run_instance() call.
If the zone_blob hint was passed in, don't re-schedule.
"""
global was_called
sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_provision_resource', fake_provision_resource)
request_spec = {
'instance_properties': {},
'instance_type': {},
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter',
'blob': "Non-None blob data",
}
instances = sched.schedule_run_instance(None, request_spec)
self.assertTrue(instances)
self.assertTrue(was_called)
def test_provision_resource_local(self):
"""Provision a resource locally or remotely."""
global was_called
sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_provision_resource_locally',
fake_provision_resource_locally)
request_spec = {'hostname': "foo"}
sched._provision_resource(None, request_spec, request_spec, {})
self.assertTrue(was_called)
def test_provision_resource_remote(self):
"""Provision a resource locally or remotely."""
global was_called
sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_provision_resource_from_blob',
fake_provision_resource_from_blob)
request_spec = {}
sched._provision_resource(None, request_spec, request_spec, {})
self.assertTrue(was_called)
def test_provision_resource_from_blob_empty(self):
"""Provision a resource locally or remotely given no hints."""
global was_called
sched = FakeAbstractScheduler()
request_spec = {}
self.assertRaises(abstract_scheduler.InvalidBlob,
sched._provision_resource_from_blob,
None, {}, {}, {})
def test_provision_resource_from_blob_with_local_blob(self):
"""
Provision a resource locally or remotely when blob hint passed in.
"""
global was_called
sched = FakeAbstractScheduler()
was_called = False
def fake_create_db_entry_for_new_instance(self, context,
image, base_options, security_group,
block_device_mapping, num=1):
global was_called
was_called = True
# return fake instances
return {'id': 1, 'uuid': 'f874093c-7b17-49c0-89c3-22a5348497f9'}
def fake_cast_to_compute_host(*args, **kwargs):
pass
self.stubs.Set(sched, '_decrypt_blob',
fake_decrypt_blob_returns_local_info)
self.stubs.Set(driver, 'cast_to_compute_host',
fake_cast_to_compute_host)
self.stubs.Set(compute_api.API,
'create_db_entry_for_new_instance',
fake_create_db_entry_for_new_instance)
build_plan_item = {'blob': "Non-None blob data"}
request_spec = {'image': {}, 'instance_properties': {}}
sched._provision_resource_from_blob(None, build_plan_item,
request_spec, {})
self.assertTrue(was_called)
def test_provision_resource_from_blob_with_child_blob(self):
"""
Provision a resource locally or remotely when child blob hint
passed in.
"""
global was_called
sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_decrypt_blob',
fake_decrypt_blob_returns_child_info)
was_called = False
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
fake_ask_child_zone_to_create_instance)
request_spec = {'blob': "Non-None blob data"}
sched._provision_resource_from_blob(None, request_spec,
request_spec, {})
self.assertTrue(was_called)
def test_provision_resource_from_blob_with_immediate_child_blob(self):
"""
Provision a resource locally or remotely when blob hint passed in
from an immediate child.
"""
global was_called
sched = FakeAbstractScheduler()
was_called = False
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
fake_ask_child_zone_to_create_instance)
request_spec = {'child_blob': True, 'child_zone': True}
sched._provision_resource_from_blob(None, request_spec,
request_spec, {})
self.assertTrue(was_called)
def test_decrypt_blob(self):
"""Test that the decrypt method works."""
fixture = FakeAbstractScheduler()
test_data = {"foo": "bar"}
class StubDecryptor(object):
def decryptor(self, key):
return lambda blob: blob
self.stubs.Set(abstract_scheduler, 'crypto',
StubDecryptor())
self.assertEqual(fixture._decrypt_blob(test_data),
json.dumps(test_data))
def test_empty_local_hosts(self):
"""
Create a nested set of FakeZones, try to build multiple instances
and ensure that a select call returns the appropriate build plan.
"""
sched = FakeAbstractScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
zm = FakeZoneManager()
# patch this to have no local hosts
zm.service_states = {}
sched.set_zone_manager(zm)
fake_context = context.RequestContext('user', 'project')
build_plan = sched.select(fake_context,
{'instance_type': {'memory_mb': 512},
'num_instances': 4})
# 0 from local zones, 12 from remotes
self.assertEqual(12, len(build_plan))
def test_run_instance_non_admin(self):
"""Test creating an instance locally using run_instance, passing
a non-admin context. DB actions should work."""
sched = FakeAbstractScheduler()
def fake_cast_to_compute_host(*args, **kwargs):
pass
def fake_zone_get_all_zero(context):
# make sure this is called with admin context, even though
# we're using user context below
self.assertTrue(context.is_admin)
return []
self.stubs.Set(driver, 'cast_to_compute_host',
fake_cast_to_compute_host)
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all_zero)
zm = FakeZoneManager()
sched.set_zone_manager(zm)
fake_context = context.RequestContext('user', 'project')
request_spec = {
'image': {'properties': {}},
'security_group': [],
'instance_properties': {
'project_id': fake_context.project_id,
'user_id': fake_context.user_id},
'instance_type': {'memory_mb': 256},
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter'
}
instances = sched.schedule_run_instance(fake_context, request_spec)
self.assertEqual(len(instances), 1)
self.assertFalse(instances[0].get('_is_precooked', False))
nova.db.instance_destroy(fake_context, instances[0]['id'])
class BaseSchedulerTestCase(test.TestCase):
"""Test case for Base Scheduler."""
def test_weigh_hosts(self):
"""
Try to weigh a short list of hosts and make sure enough
entries for a larger number instances are returned.
"""
sched = FakeBaseScheduler()
# Fake out a list of hosts
zm = FakeZoneManager()
hostlist = [(host, services['compute'])
for host, services in zm.service_states.items()
if 'compute' in services]
# Call weigh_hosts()
num_instances = len(hostlist) * 2 + len(hostlist) / 2
instlist = sched.weigh_hosts(dict(num_instances=num_instances),
hostlist)
# Should be enough entries to cover all instances
self.assertEqual(len(instlist), num_instances)

View File

@ -0,0 +1,262 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Distributed Scheduler.
"""
import json
import nova.db
from nova import context
from nova import exception
from nova import rpc
from nova import test
from nova.compute import api as compute_api
from nova.scheduler import driver
from nova.scheduler import distributed_scheduler
from nova.scheduler import least_cost
from nova.scheduler import zone_manager
from nova.tests.scheduler import fake_zone_manager as ds_fakes
class FakeEmptyZoneManager(zone_manager.ZoneManager):
def __init__(self):
self.service_states = {}
def get_host_list_from_db(self, context):
return []
def _compute_node_get_all(*args, **kwargs):
return []
def _instance_get_all(*args, **kwargs):
return []
def fake_call_zone_method(context, method, specs, zones):
return [
(1, [
dict(weight=2, blob='AAAAAAA'),
dict(weight=4, blob='BBBBBBB'),
dict(weight=6, blob='CCCCCCC'),
dict(weight=8, blob='DDDDDDD'),
]),
(2, [
dict(weight=10, blob='EEEEEEE'),
dict(weight=12, blob='FFFFFFF'),
dict(weight=14, blob='GGGGGGG'),
dict(weight=16, blob='HHHHHHH'),
]),
(3, [
dict(weight=18, blob='IIIIIII'),
dict(weight=20, blob='JJJJJJJ'),
dict(weight=22, blob='KKKKKKK'),
dict(weight=24, blob='LLLLLLL'),
]),
]
def fake_zone_get_all(context):
return [
dict(id=1, api_url='zone1',
username='admin', password='password',
weight_offset=0.0, weight_scale=1.0),
dict(id=2, api_url='zone2',
username='admin', password='password',
weight_offset=1000.0, weight_scale=1.0),
dict(id=3, api_url='zone3',
username='admin', password='password',
weight_offset=0.0, weight_scale=1000.0),
]
class DistributedSchedulerTestCase(test.TestCase):
"""Test case for Distributed Scheduler."""
def test_adjust_child_weights(self):
"""Make sure the weights returned by child zones are
properly adjusted based on the scale/offset in the zone
db entries.
"""
sched = ds_fakes.FakeDistributedScheduler()
child_results = fake_call_zone_method(None, None, None, None)
zones = fake_zone_get_all(None)
weighted_hosts = sched._adjust_child_weights(child_results, zones)
scaled = [130000, 131000, 132000, 3000]
for weighted_host in weighted_hosts:
w = weighted_host.weight
if weighted_host.zone == 'zone1': # No change
self.assertTrue(w < 1000.0)
if weighted_host.zone == 'zone2': # Offset +1000
self.assertTrue(w >= 1000.0 and w < 2000)
if weighted_host.zone == 'zone3': # Scale x1000
self.assertEqual(scaled.pop(0), w)
def test_run_instance_no_hosts(self):
"""
Ensure empty hosts & child_zones result in NoValidHosts exception.
"""
def _fake_empty_call_zone_method(*args, **kwargs):
return []
sched = ds_fakes.FakeDistributedScheduler()
sched.zone_manager = FakeEmptyZoneManager()
self.stubs.Set(sched, '_call_zone_method',
_fake_empty_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
fake_context = context.RequestContext('user', 'project')
request_spec = dict(instance_type=dict(memory_mb=1, local_gb=1))
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
fake_context, request_spec)
def test_run_instance_with_blob_hint(self):
"""
Check the local/child zone routing in the run_instance() call.
If the zone_blob hint was passed in, don't re-schedule.
"""
self.schedule_called = False
self.from_blob_called = False
self.locally_called = False
self.child_zone_called = False
def _fake_schedule(*args, **kwargs):
self.schedule_called = True
return least_cost.WeightedHost(1, host='x')
def _fake_make_weighted_host_from_blob(*args, **kwargs):
self.from_blob_called = True
return least_cost.WeightedHost(1, zone='x', blob='y')
def _fake_provision_resource_locally(*args, **kwargs):
self.locally_called = True
return 1
def _fake_ask_child_zone_to_create_instance(*args, **kwargs):
self.child_zone_called = True
return 2
sched = ds_fakes.FakeDistributedScheduler()
self.stubs.Set(sched, '_schedule', _fake_schedule)
self.stubs.Set(sched, '_make_weighted_host_from_blob',
_fake_make_weighted_host_from_blob)
self.stubs.Set(sched, '_provision_resource_locally',
_fake_provision_resource_locally)
self.stubs.Set(sched, '_ask_child_zone_to_create_instance',
_fake_ask_child_zone_to_create_instance)
request_spec = {
'instance_properties': {},
'instance_type': {},
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter',
'blob': "Non-None blob data",
}
fake_context = context.RequestContext('user', 'project')
instances = sched.schedule_run_instance(fake_context, request_spec)
self.assertTrue(instances)
self.assertFalse(self.schedule_called)
self.assertTrue(self.from_blob_called)
self.assertTrue(self.child_zone_called)
self.assertFalse(self.locally_called)
self.assertEquals(instances, [2])
def test_run_instance_non_admin(self):
"""Test creating an instance locally using run_instance, passing
a non-admin context. DB actions should work."""
self.was_admin = False
def fake_schedule(context, *args, **kwargs):
# make sure this is called with admin context, even though
# we're using user context below
self.was_admin = context.is_admin
return []
sched = ds_fakes.FakeDistributedScheduler()
self.stubs.Set(sched, '_schedule', fake_schedule)
fake_context = context.RequestContext('user', 'project')
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
fake_context, {})
self.assertTrue(self.was_admin)
def test_schedule_bad_topic(self):
"""Parameter checking."""
sched = ds_fakes.FakeDistributedScheduler()
self.assertRaises(NotImplementedError, sched._schedule, None, "foo",
{})
def test_schedule_no_instance_type(self):
"""Parameter checking."""
sched = ds_fakes.FakeDistributedScheduler()
self.assertRaises(NotImplementedError, sched._schedule, None,
"compute", {})
def test_schedule_happy_day(self):
"""_schedule() has no branching logic beyond basic input parameter
checking. Just make sure there's nothing glaringly wrong by doing
a happy day pass through."""
self.next_weight = 1.0
def _fake_filter_hosts(topic, request_info, unfiltered_hosts):
return unfiltered_hosts
def _fake_weigh_hosts(request_info, hosts):
self.next_weight += 2.0
host, hostinfo = hosts[0]
return least_cost.WeightedHost(self.next_weight, host=host,
hostinfo=hostinfo)
sched = ds_fakes.FakeDistributedScheduler()
fake_context = context.RequestContext('user', 'project')
sched.zone_manager = ds_fakes.FakeZoneManager()
self.stubs.Set(sched, '_filter_hosts', _fake_filter_hosts)
self.stubs.Set(least_cost, 'weigh_hosts', _fake_weigh_hosts)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
instance_type = dict(memory_mb=512, local_gb=512)
request_spec = dict(num_instances=10, instance_type=instance_type)
weighted_hosts = sched._schedule(fake_context, 'compute',
request_spec)
self.assertEquals(len(weighted_hosts), 10)
for weighted_host in weighted_hosts:
# We set this up so remote hosts have even weights ...
if int(weighted_host.weight) % 2 == 0:
self.assertTrue(weighted_host.zone != None)
self.assertTrue(weighted_host.host == None)
else:
self.assertTrue(weighted_host.host != None)
self.assertTrue(weighted_host.zone == None)
def test_decrypt_blob(self):
"""Test that the decrypt method works."""
fixture = ds_fakes.FakeDistributedScheduler()
test_data = {'weight': 1, 'host': 'x', 'blob': 'y', 'zone': 'z'}
class StubDecryptor(object):
def decryptor(self, key):
return lambda blob: blob
self.stubs.Set(distributed_scheduler, 'crypto', StubDecryptor())
weighted_host = fixture._make_weighted_host_from_blob(
json.dumps(test_data))
self.assertTrue(isinstance(weighted_host, least_cost.WeightedHost))
self.assertEqual(weighted_host.to_dict(), dict(weight=1, host='x',
blob='y', zone='z'))

View File

@ -21,11 +21,9 @@ import json
import nova
from nova import exception
from nova import test
from nova.scheduler import host_filter
class FakeZoneManager:
pass
from nova.scheduler import distributed_scheduler as dist
from nova.scheduler import zone_manager
from nova.tests.scheduler import fake_zone_manager as ds_fakes
class HostFilterTestCase(test.TestCase):
@ -60,18 +58,18 @@ class HostFilterTestCase(test.TestCase):
default_host_filters = ['AllHostsFilter']
self.flags(default_host_filters=default_host_filters)
self.instance_type = dict(name='tiny',
memory_mb=50,
memory_mb=30,
vcpus=10,
local_gb=500,
local_gb=300,
flavorid=1,
swap=500,
rxtx_quota=30000,
rxtx_cap=200,
extra_specs={})
self.gpu_instance_type = dict(name='tiny.gpu',
memory_mb=50,
memory_mb=30,
vcpus=10,
local_gb=500,
local_gb=300,
flavorid=2,
swap=500,
rxtx_quota=30000,
@ -79,86 +77,89 @@ class HostFilterTestCase(test.TestCase):
extra_specs={'xpu_arch': 'fermi',
'xpu_info': 'Tesla 2050'})
self.zone_manager = FakeZoneManager()
self.zone_manager = ds_fakes.FakeZoneManager()
states = {}
for x in xrange(10):
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
for x in xrange(4):
states['host%d' % (x + 1)] = {'compute': self._host_caps(x)}
self.zone_manager.service_states = states
# Add some extra capabilities to some hosts
host07 = self.zone_manager.service_states['host07']['compute']
host07['xpu_arch'] = 'fermi'
host07['xpu_info'] = 'Tesla 2050'
host4 = self.zone_manager.service_states['host4']['compute']
host4['xpu_arch'] = 'fermi'
host4['xpu_info'] = 'Tesla 2050'
host08 = self.zone_manager.service_states['host08']['compute']
host08['xpu_arch'] = 'radeon'
host2 = self.zone_manager.service_states['host2']['compute']
host2['xpu_arch'] = 'radeon'
host09 = self.zone_manager.service_states['host09']['compute']
host09['xpu_arch'] = 'fermi'
host09['xpu_info'] = 'Tesla 2150'
host3 = self.zone_manager.service_states['host3']['compute']
host3['xpu_arch'] = 'fermi'
host3['xpu_info'] = 'Tesla 2150'
def _get_all_hosts(self):
return self.zone_manager.service_states.items()
return self.zone_manager.get_all_host_data(None).items()
def test_choose_filter(self):
# Test default filter ...
hfs = host_filter.choose_host_filters()
sched = dist.DistributedScheduler()
hfs = sched._choose_host_filters()
hf = hfs[0]
self.assertEquals(hf._full_name().split(".")[-1], 'AllHostsFilter')
# Test valid filter ...
hfs = host_filter.choose_host_filters('InstanceTypeFilter')
hfs = sched._choose_host_filters('InstanceTypeFilter')
hf = hfs[0]
self.assertEquals(hf._full_name().split(".")[-1], 'InstanceTypeFilter')
# Test invalid filter ...
try:
host_filter.choose_host_filters('does not exist')
sched._choose_host_filters('does not exist')
self.fail("Should not find host filter.")
except exception.SchedulerHostFilterNotFound:
pass
def test_all_host_filter(self):
hfs = host_filter.choose_host_filters('AllHostsFilter')
sched = dist.DistributedScheduler()
hfs = sched._choose_host_filters('AllHostsFilter')
hf = hfs[0]
all_hosts = self._get_all_hosts()
cooked = hf.instance_type_to_filter(self.instance_type)
hosts = hf.filter_hosts(all_hosts, cooked)
self.assertEquals(10, len(hosts))
self.assertEquals(4, len(hosts))
for host, capabilities in hosts:
self.assertTrue(host.startswith('host'))
def test_instance_type_filter(self):
hf = nova.scheduler.filters.InstanceTypeFilter()
# filter all hosts that can support 50 ram and 500 disk
# filter all hosts that can support 30 ram and 300 disk
cooked = hf.instance_type_to_filter(self.instance_type)
all_hosts = self._get_all_hosts()
hosts = hf.filter_hosts(all_hosts, cooked)
self.assertEquals(6, len(hosts))
just_hosts = [host for host, caps in hosts]
self.assertEquals(3, len(hosts))
just_hosts = [host for host, hostinfo in hosts]
just_hosts.sort()
self.assertEquals('host05', just_hosts[0])
self.assertEquals('host10', just_hosts[5])
self.assertEquals('host4', just_hosts[2])
self.assertEquals('host3', just_hosts[1])
self.assertEquals('host2', just_hosts[0])
def test_instance_type_filter_extra_specs(self):
hf = nova.scheduler.filters.InstanceTypeFilter()
# filter all hosts that can support 50 ram and 500 disk
# filter all hosts that can support 30 ram and 300 disk
cooked = hf.instance_type_to_filter(self.gpu_instance_type)
all_hosts = self._get_all_hosts()
hosts = hf.filter_hosts(all_hosts, cooked)
self.assertEquals(1, len(hosts))
just_hosts = [host for host, caps in hosts]
self.assertEquals('host07', just_hosts[0])
self.assertEquals('host4', just_hosts[0])
def test_json_filter(self):
hf = nova.scheduler.filters.JsonFilter()
# filter all hosts that can support 50 ram and 500 disk
# filter all hosts that can support 30 ram and 300 disk
cooked = hf.instance_type_to_filter(self.instance_type)
all_hosts = self._get_all_hosts()
hosts = hf.filter_hosts(all_hosts, cooked)
self.assertEquals(6, len(hosts))
self.assertEquals(2, len(hosts))
just_hosts = [host for host, caps in hosts]
just_hosts.sort()
self.assertEquals('host05', just_hosts[0])
self.assertEquals('host10', just_hosts[5])
self.assertEquals('host3', just_hosts[0])
self.assertEquals('host4', just_hosts[1])
# Try some custom queries
@ -168,18 +169,18 @@ class HostFilterTestCase(test.TestCase):
['<', '$compute.disk_available', 300],
],
['and',
['>', '$compute.host_memory_free', 70],
['>', '$compute.disk_available', 700],
['>', '$compute.host_memory_free', 30],
['>', '$compute.disk_available', 300],
]
]
cooked = json.dumps(raw)
hosts = hf.filter_hosts(all_hosts, cooked)
self.assertEquals(5, len(hosts))
self.assertEquals(3, len(hosts))
just_hosts = [host for host, caps in hosts]
just_hosts.sort()
for index, host in zip([1, 2, 8, 9, 10], just_hosts):
self.assertEquals('host%02d' % index, host)
for index, host in zip([1, 2, 4], just_hosts):
self.assertEquals('host%d' % index, host)
raw = ['not',
['=', '$compute.host_memory_free', 30],
@ -187,20 +188,20 @@ class HostFilterTestCase(test.TestCase):
cooked = json.dumps(raw)
hosts = hf.filter_hosts(all_hosts, cooked)
self.assertEquals(9, len(hosts))
self.assertEquals(3, len(hosts))
just_hosts = [host for host, caps in hosts]
just_hosts.sort()
for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
self.assertEquals('host%02d' % index, host)
for index, host in zip([1, 2, 4], just_hosts):
self.assertEquals('host%d' % index, host)
raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100]
cooked = json.dumps(raw)
hosts = hf.filter_hosts(all_hosts, cooked)
self.assertEquals(5, len(hosts))
self.assertEquals(2, len(hosts))
just_hosts = [host for host, caps in hosts]
just_hosts.sort()
for index, host in zip([2, 4, 6, 8, 10], just_hosts):
self.assertEquals('host%02d' % index, host)
for index, host in zip([2, 4], just_hosts):
self.assertEquals('host%d' % index, host)
# Try some bogus input ...
raw = ['unknown command', ]

View File

@ -0,0 +1,109 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Least Cost functions.
"""
from nova.scheduler import least_cost
from nova.scheduler import zone_manager
from nova import test
from nova.tests.scheduler import fake_zone_manager
def offset(hostinfo):
return hostinfo.free_ram_mb + 10000
def scale(hostinfo):
return hostinfo.free_ram_mb * 2
class LeastCostTestCase(test.TestCase):
def setUp(self):
super(LeastCostTestCase, self).setUp()
self.zone_manager = fake_zone_manager.FakeZoneManager()
def tearDown(self):
super(LeastCostTestCase, self).tearDown()
def test_normalize_grid(self):
raw = [
[1, 2, 3, 4, 5],
[10, 20, 30, 40, 50],
[100, 200, 300, 400, 500],
]
expected = [
[.2, .4, .6, .8, 1.0],
[.2, .4, .6, .8, 1.0],
[.2, .4, .6, .8, 1.0],
]
self.assertEquals(expected, least_cost.normalize_grid(raw))
self.assertEquals([[]], least_cost.normalize_grid([]))
self.assertEquals([[]], least_cost.normalize_grid([[]]))
def test_weighted_sum_happy_day(self):
fn_tuples = [(1.0, offset), (1.0, scale)]
hostinfo_list = self.zone_manager.get_all_host_data(None).items()
# host1: free_ram_mb=0
# host2: free_ram_mb=1536
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# [offset, scale]=
# [10000, 11536, 13072, 18192]
# [0, 768, 1536, 4096]
# normalized =
# [ 0.55, 0.63, 0.72, 1.0]
# [ 0.0, 0.19, 0.38, 1.0]
# adjusted [ 1.0 * x + 1.0 * y] =
# [0.55, 0.82, 1.1, 2.0]
# so, host1 should win:
weighted_host = least_cost.weighted_sum(hostinfo_list, fn_tuples)
self.assertTrue(abs(weighted_host.weight - 0.55) < 0.01)
self.assertEqual(weighted_host.host, 'host1')
def test_weighted_sum_single_function(self):
fn_tuples = [(1.0, offset), ]
hostinfo_list = self.zone_manager.get_all_host_data(None).items()
# host1: free_ram_mb=0
# host2: free_ram_mb=1536
# host3: free_ram_mb=3072
# host4: free_ram_mb=8192
# [offset, ]=
# [10000, 11536, 13072, 18192]
# normalized =
# [ 0.55, 0.63, 0.72, 1.0]
# so, host1 should win:
weighted_host = least_cost.weighted_sum(hostinfo_list, fn_tuples)
self.assertTrue(abs(weighted_host.weight - 0.55) < 0.01)
self.assertEqual(weighted_host.host, 'host1')
def test_get_cost_functions(self):
fns = least_cost.get_cost_fns()
self.assertEquals(len(fns), 1)
weight, fn = fns[0]
self.assertEquals(weight, 1.0)
hostinfo = zone_manager.HostInfo('host', free_ram_mb=1000)
self.assertEquals(1000, fn(hostinfo))

View File

@ -1,116 +0,0 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Least Cost Scheduler
"""
import copy
from nova import test
from nova.scheduler import least_cost
from nova.tests.scheduler import test_abstract_scheduler
MB = 1024 * 1024
class FakeHost(object):
def __init__(self, host_id, free_ram, io):
self.id = host_id
self.free_ram = free_ram
self.io = io
class WeightedSumTestCase(test.TestCase):
def test_empty_domain(self):
domain = []
weighted_fns = []
result = least_cost.weighted_sum(domain, weighted_fns)
expected = []
self.assertEqual(expected, result)
def test_basic_costing(self):
hosts = [
FakeHost(1, 512 * MB, 100),
FakeHost(2, 256 * MB, 400),
FakeHost(3, 512 * MB, 100),
]
weighted_fns = [
(1, lambda h: h.free_ram), # Fill-first, free_ram is a *cost*
(2, lambda h: h.io), # Avoid high I/O
]
costs = least_cost.weighted_sum(
domain=hosts, weighted_fns=weighted_fns)
# Each 256 MB unit of free-ram contributes 0.5 points by way of:
# cost = weight * (score/max_score) = 1 * (256/512) = 0.5
# Each 100 iops of IO adds 0.5 points by way of:
# cost = 2 * (100/400) = 2 * 0.25 = 0.5
expected = [1.5, 2.5, 1.5]
self.assertEqual(expected, costs)
class LeastCostSchedulerTestCase(test.TestCase):
def setUp(self):
super(LeastCostSchedulerTestCase, self).setUp()
class FakeZoneManager:
pass
zone_manager = FakeZoneManager()
states = test_abstract_scheduler.fake_zone_manager_service_states(
num_hosts=10)
zone_manager.service_states = states
self.sched = least_cost.LeastCostScheduler()
self.sched.zone_manager = zone_manager
def tearDown(self):
super(LeastCostSchedulerTestCase, self).tearDown()
def assertWeights(self, expected, num, request_spec, hosts):
weighted = self.sched.weigh_hosts(request_spec, hosts)
self.assertDictListMatch(weighted, expected, approx_equal=True)
def test_no_hosts(self):
num = 1
request_spec = {}
hosts = []
expected = []
self.assertWeights(expected, num, request_spec, hosts)
def test_noop_cost_fn(self):
self.flags(least_cost_scheduler_cost_functions=[
'nova.scheduler.least_cost.noop_cost_fn'],
noop_cost_fn_weight=1)
num = 1
request_spec = {}
hosts = self.sched.filter_hosts(num, request_spec)
expected = [{"hostname": hostname, "weight": 1, "capabilities": caps}
for hostname, caps in hosts]
self.assertWeights(expected, num, request_spec, hosts)
def test_cost_fn_weights(self):
self.flags(least_cost_scheduler_cost_functions=[
'nova.scheduler.least_cost.noop_cost_fn'],
noop_cost_fn_weight=2)
num = 1
request_spec = {}
hosts = self.sched.filter_hosts(num, request_spec)
expected = [{"hostname": hostname, "weight": 2, "capabilities": caps}
for hostname, caps in hosts]
self.assertWeights(expected, num, request_spec, hosts)

View File

@ -25,6 +25,7 @@ semantics of real hypervisor connections.
"""
from nova import db
from nova import exception
from nova import log as logging
from nova import utils
@ -238,8 +239,36 @@ class FakeConnection(driver.ComputeDriver):
pass
def update_available_resource(self, ctxt, host):
"""This method is supported only by libvirt."""
return
"""Updates compute manager resource info on ComputeNode table.
Since we don't have a real hypervisor, pretend we have lots of
disk and ram.
"""
try:
service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
except exception.NotFound:
raise exception.ComputeServiceUnavailable(host=host)
# Updating host information
dic = {'vcpus': 1,
'memory_mb': 4096,
'local_gb': 1028,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': '1.0',
'cpu_info': '?'}
compute_node_ref = service_ref['compute_node']
if not compute_node_ref:
LOG.info(_('Compute_service record created for %s ') % host)
dic['service_id'] = service_ref['id']
db.compute_node_create(ctxt, dic)
else:
LOG.info(_('Compute_service record updated for %s ') % host)
db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
def compare_cpu(self, xml):
"""This method is supported only by libvirt."""

View File

@ -354,8 +354,47 @@ class XenAPIConnection(driver.ComputeDriver):
'password': FLAGS.xenapi_connection_password}
def update_available_resource(self, ctxt, host):
"""This method is supported only by libvirt."""
return
"""Updates compute manager resource info on ComputeNode table.
This method is called when nova-compute launches, and
whenever admin executes "nova-manage service update_resource".
:param ctxt: security context
:param host: hostname that compute manager is currently running
"""
try:
service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
except exception.NotFound:
raise exception.ComputeServiceUnavailable(host=host)
host_stats = self.get_host_stats(refresh=True)
# Updating host information
total_ram_mb = host_stats['host_memory_total'] / (1024 * 1024)
free_ram_mb = host_stats['host_memory_free'] / (1024 * 1024)
total_disk_gb = host_stats['disk_total'] / (1024 * 1024 * 1024)
used_disk_gb = host_stats['disk_used'] / (1024 * 1024 * 1024)
dic = {'vcpus': 0,
'memory_mb': total_ram_mb,
'local_gb': total_disk_gb,
'vcpus_used': 0,
'memory_mb_used': total_ram_mb - free_ram_mb,
'local_gb_used': used_disk_gb,
'hypervisor_type': 'xen',
'hypervisor_version': 0,
'cpu_info': host_stats['host_cpu_info']['cpu_count']}
compute_node_ref = service_ref['compute_node']
if not compute_node_ref:
LOG.info(_('Compute_service record created for %s ') % host)
dic['service_id'] = service_ref['id']
db.compute_node_create(ctxt, dic)
else:
LOG.info(_('Compute_service record updated for %s ') % host)
db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
def compare_cpu(self, xml):
"""This method is supported only by libvirt."""