trunk merge
This commit is contained in:
@@ -45,23 +45,20 @@ def get_pagination_params(request):
|
|||||||
exc.HTTPBadRequest() exceptions to be raised.
|
exc.HTTPBadRequest() exceptions to be raised.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
|
params = {}
|
||||||
|
for param in ['marker', 'limit']:
|
||||||
|
if not param in request.GET:
|
||||||
|
continue
|
||||||
try:
|
try:
|
||||||
marker = int(request.GET.get('marker', 0))
|
params[param] = int(request.GET[param])
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise webob.exc.HTTPBadRequest(_('marker param must be an integer'))
|
msg = _('%s param must be an integer') % param
|
||||||
|
raise webob.exc.HTTPBadRequest(msg)
|
||||||
|
if params[param] < 0:
|
||||||
|
msg = _('%s param must be positive') % param
|
||||||
|
raise webob.exc.HTTPBadRequest(msg)
|
||||||
|
|
||||||
try:
|
return params
|
||||||
limit = int(request.GET.get('limit', 0))
|
|
||||||
except ValueError:
|
|
||||||
raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
|
|
||||||
|
|
||||||
if limit < 0:
|
|
||||||
raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
|
|
||||||
|
|
||||||
if marker < 0:
|
|
||||||
raise webob.exc.HTTPBadRequest(_('marker param must be positive'))
|
|
||||||
|
|
||||||
return(marker, limit)
|
|
||||||
|
|
||||||
|
|
||||||
def limited(items, request, max_limit=FLAGS.osapi_max_limit):
|
def limited(items, request, max_limit=FLAGS.osapi_max_limit):
|
||||||
@@ -100,10 +97,10 @@ def limited(items, request, max_limit=FLAGS.osapi_max_limit):
|
|||||||
|
|
||||||
def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
|
def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
|
||||||
"""Return a slice of items according to the requested marker and limit."""
|
"""Return a slice of items according to the requested marker and limit."""
|
||||||
(marker, limit) = get_pagination_params(request)
|
params = get_pagination_params(request)
|
||||||
|
|
||||||
if limit == 0:
|
limit = params.get('limit', max_limit)
|
||||||
limit = max_limit
|
marker = params.get('marker')
|
||||||
|
|
||||||
limit = min(max_limit, limit)
|
limit = min(max_limit, limit)
|
||||||
start_index = 0
|
start_index = 0
|
||||||
|
|||||||
@@ -114,6 +114,15 @@ class CreateInstanceHelper(object):
|
|||||||
name = name.strip()
|
name = name.strip()
|
||||||
|
|
||||||
reservation_id = body['server'].get('reservation_id')
|
reservation_id = body['server'].get('reservation_id')
|
||||||
|
min_count = body['server'].get('min_count')
|
||||||
|
max_count = body['server'].get('max_count')
|
||||||
|
# min_count and max_count are optional. If they exist, they come
|
||||||
|
# in as strings. We want to default 'min_count' to 1, and default
|
||||||
|
# 'max_count' to be 'min_count'.
|
||||||
|
min_count = int(min_count) if min_count else 1
|
||||||
|
max_count = int(max_count) if max_count else min_count
|
||||||
|
if min_count > max_count:
|
||||||
|
min_count = max_count
|
||||||
|
|
||||||
try:
|
try:
|
||||||
inst_type = \
|
inst_type = \
|
||||||
@@ -137,7 +146,9 @@ class CreateInstanceHelper(object):
|
|||||||
injected_files=injected_files,
|
injected_files=injected_files,
|
||||||
admin_password=password,
|
admin_password=password,
|
||||||
zone_blob=zone_blob,
|
zone_blob=zone_blob,
|
||||||
reservation_id=reservation_id))
|
reservation_id=reservation_id,
|
||||||
|
min_count=min_count,
|
||||||
|
max_count=max_count))
|
||||||
except quota.QuotaError as error:
|
except quota.QuotaError as error:
|
||||||
self._handle_quota_error(error)
|
self._handle_quota_error(error)
|
||||||
except exception.ImageNotFound as error:
|
except exception.ImageNotFound as error:
|
||||||
|
|||||||
@@ -217,9 +217,9 @@ class ControllerV11(Controller):
|
|||||||
"""
|
"""
|
||||||
context = req.environ['nova.context']
|
context = req.environ['nova.context']
|
||||||
filters = self._get_filters(req)
|
filters = self._get_filters(req)
|
||||||
(marker, limit) = common.get_pagination_params(req)
|
page_params = common.get_pagination_params(req)
|
||||||
images = self._image_service.index(
|
images = self._image_service.index(context, filters=filters,
|
||||||
context, filters=filters, marker=marker, limit=limit)
|
**page_params)
|
||||||
builder = self.get_builder(req).build
|
builder = self.get_builder(req).build
|
||||||
return dict(images=[builder(image, detail=False) for image in images])
|
return dict(images=[builder(image, detail=False) for image in images])
|
||||||
|
|
||||||
@@ -231,9 +231,9 @@ class ControllerV11(Controller):
|
|||||||
"""
|
"""
|
||||||
context = req.environ['nova.context']
|
context = req.environ['nova.context']
|
||||||
filters = self._get_filters(req)
|
filters = self._get_filters(req)
|
||||||
(marker, limit) = common.get_pagination_params(req)
|
page_params = common.get_pagination_params(req)
|
||||||
images = self._image_service.detail(
|
images = self._image_service.detail(context, filters=filters,
|
||||||
context, filters=filters, marker=marker, limit=limit)
|
**page_params)
|
||||||
builder = self.get_builder(req).build
|
builder = self.get_builder(req).build
|
||||||
return dict(images=[builder(image, detail=True) for image in images])
|
return dict(images=[builder(image, detail=True) for image in images])
|
||||||
|
|
||||||
|
|||||||
@@ -76,10 +76,17 @@ class Controller(object):
|
|||||||
|
|
||||||
builder - the response model builder
|
builder - the response model builder
|
||||||
"""
|
"""
|
||||||
reservation_id = req.str_GET.get('reservation_id')
|
query_str = req.str_GET
|
||||||
|
reservation_id = query_str.get('reservation_id')
|
||||||
|
project_id = query_str.get('project_id')
|
||||||
|
fixed_ip = query_str.get('fixed_ip')
|
||||||
|
recurse_zones = utils.bool_from_str(query_str.get('recurse_zones'))
|
||||||
instance_list = self.compute_api.get_all(
|
instance_list = self.compute_api.get_all(
|
||||||
req.environ['nova.context'],
|
req.environ['nova.context'],
|
||||||
reservation_id=reservation_id)
|
reservation_id=reservation_id,
|
||||||
|
project_id=project_id,
|
||||||
|
fixed_ip=fixed_ip,
|
||||||
|
recurse_zones=recurse_zones)
|
||||||
limited_list = self._limit_items(instance_list, req)
|
limited_list = self._limit_items(instance_list, req)
|
||||||
builder = self._get_view_builder(req)
|
builder = self._get_view_builder(req)
|
||||||
servers = [builder.build(inst, is_detail)['server']
|
servers = [builder.build(inst, is_detail)['server']
|
||||||
@@ -111,14 +118,15 @@ class Controller(object):
|
|||||||
extra_values = None
|
extra_values = None
|
||||||
result = None
|
result = None
|
||||||
try:
|
try:
|
||||||
extra_values, result = self.helper.create_instance(
|
extra_values, instances = self.helper.create_instance(
|
||||||
req, body, self.compute_api.create)
|
req, body, self.compute_api.create)
|
||||||
except faults.Fault, f:
|
except faults.Fault, f:
|
||||||
return f
|
return f
|
||||||
|
|
||||||
instances = result
|
# We can only return 1 instance via the API, if we happen to
|
||||||
|
# build more than one... instances is a list, so we'll just
|
||||||
(inst, ) = instances
|
# use the first one..
|
||||||
|
inst = instances[0]
|
||||||
for key in ['instance_type', 'image_ref']:
|
for key in ['instance_type', 'image_ref']:
|
||||||
inst[key] = extra_values[key]
|
inst[key] = extra_values[key]
|
||||||
|
|
||||||
|
|||||||
@@ -143,7 +143,7 @@ class API(base.Base):
|
|||||||
|
|
||||||
def _check_create_parameters(self, context, instance_type,
|
def _check_create_parameters(self, context, instance_type,
|
||||||
image_href, kernel_id=None, ramdisk_id=None,
|
image_href, kernel_id=None, ramdisk_id=None,
|
||||||
min_count=1, max_count=1,
|
min_count=None, max_count=None,
|
||||||
display_name='', display_description='',
|
display_name='', display_description='',
|
||||||
key_name=None, key_data=None, security_group='default',
|
key_name=None, key_data=None, security_group='default',
|
||||||
availability_zone=None, user_data=None, metadata={},
|
availability_zone=None, user_data=None, metadata={},
|
||||||
@@ -154,6 +154,10 @@ class API(base.Base):
|
|||||||
|
|
||||||
if not instance_type:
|
if not instance_type:
|
||||||
instance_type = instance_types.get_default_instance_type()
|
instance_type = instance_types.get_default_instance_type()
|
||||||
|
if not min_count:
|
||||||
|
min_count = 1
|
||||||
|
if not max_count:
|
||||||
|
max_count = min_count
|
||||||
|
|
||||||
num_instances = quota.allowed_instances(context, max_count,
|
num_instances = quota.allowed_instances(context, max_count,
|
||||||
instance_type)
|
instance_type)
|
||||||
@@ -338,7 +342,7 @@ class API(base.Base):
|
|||||||
|
|
||||||
def create_all_at_once(self, context, instance_type,
|
def create_all_at_once(self, context, instance_type,
|
||||||
image_href, kernel_id=None, ramdisk_id=None,
|
image_href, kernel_id=None, ramdisk_id=None,
|
||||||
min_count=1, max_count=1,
|
min_count=None, max_count=None,
|
||||||
display_name='', display_description='',
|
display_name='', display_description='',
|
||||||
key_name=None, key_data=None, security_group='default',
|
key_name=None, key_data=None, security_group='default',
|
||||||
availability_zone=None, user_data=None, metadata={},
|
availability_zone=None, user_data=None, metadata={},
|
||||||
@@ -368,7 +372,7 @@ class API(base.Base):
|
|||||||
|
|
||||||
def create(self, context, instance_type,
|
def create(self, context, instance_type,
|
||||||
image_href, kernel_id=None, ramdisk_id=None,
|
image_href, kernel_id=None, ramdisk_id=None,
|
||||||
min_count=1, max_count=1,
|
min_count=None, max_count=None,
|
||||||
display_name='', display_description='',
|
display_name='', display_description='',
|
||||||
key_name=None, key_data=None, security_group='default',
|
key_name=None, key_data=None, security_group='default',
|
||||||
availability_zone=None, user_data=None, metadata={},
|
availability_zone=None, user_data=None, metadata={},
|
||||||
@@ -613,17 +617,53 @@ class API(base.Base):
|
|||||||
"""
|
"""
|
||||||
return self.get(context, instance_id)
|
return self.get(context, instance_id)
|
||||||
|
|
||||||
def get_all_across_zones(self, context, reservation_id):
|
def get_all(self, context, project_id=None, reservation_id=None,
|
||||||
"""Get all instances with this reservation_id, across
|
fixed_ip=None, recurse_zones=False):
|
||||||
all available Zones (if any).
|
"""Get all instances filtered by one of the given parameters.
|
||||||
|
|
||||||
|
If there is no filter and the context is an admin, it will retreive
|
||||||
|
all instances in the system.
|
||||||
"""
|
"""
|
||||||
context = context.elevated()
|
|
||||||
|
if reservation_id is not None:
|
||||||
|
recurse_zones = True
|
||||||
instances = self.db.instance_get_all_by_reservation(
|
instances = self.db.instance_get_all_by_reservation(
|
||||||
context, reservation_id)
|
context, reservation_id)
|
||||||
|
elif fixed_ip is not None:
|
||||||
|
try:
|
||||||
|
instances = self.db.fixed_ip_get_instance(context, fixed_ip)
|
||||||
|
except exception.FloatingIpNotFound, e:
|
||||||
|
if not recurse_zones:
|
||||||
|
raise
|
||||||
|
instances = None
|
||||||
|
elif project_id or not context.is_admin:
|
||||||
|
if not context.project:
|
||||||
|
instances = self.db.instance_get_all_by_user(
|
||||||
|
context, context.user_id)
|
||||||
|
else:
|
||||||
|
if project_id is None:
|
||||||
|
project_id = context.project_id
|
||||||
|
instances = self.db.instance_get_all_by_project(
|
||||||
|
context, project_id)
|
||||||
|
else:
|
||||||
|
instances = self.db.instance_get_all(context)
|
||||||
|
|
||||||
children = scheduler_api.call_zone_method(context, "list",
|
if instances is None:
|
||||||
|
instances = []
|
||||||
|
elif not isinstance(instances, list):
|
||||||
|
instances = [instances]
|
||||||
|
|
||||||
|
if not recurse_zones:
|
||||||
|
return instances
|
||||||
|
|
||||||
|
admin_context = context.elevated()
|
||||||
|
children = scheduler_api.call_zone_method(admin_context,
|
||||||
|
"list",
|
||||||
novaclient_collection_name="servers",
|
novaclient_collection_name="servers",
|
||||||
reservation_id=reservation_id)
|
reservation_id=reservation_id,
|
||||||
|
project_id=project_id,
|
||||||
|
fixed_ip=fixed_ip,
|
||||||
|
recurse_zones=True)
|
||||||
|
|
||||||
for zone, servers in children:
|
for zone, servers in children:
|
||||||
for server in servers:
|
for server in servers:
|
||||||
@@ -632,32 +672,6 @@ class API(base.Base):
|
|||||||
instances.append(server._info)
|
instances.append(server._info)
|
||||||
return instances
|
return instances
|
||||||
|
|
||||||
def get_all(self, context, project_id=None, reservation_id=None,
|
|
||||||
fixed_ip=None):
|
|
||||||
"""Get all instances filtered by one of the given parameters.
|
|
||||||
|
|
||||||
If there is no filter and the context is an admin, it will retreive
|
|
||||||
all instances in the system.
|
|
||||||
"""
|
|
||||||
if reservation_id is not None:
|
|
||||||
return self.get_all_across_zones(context, reservation_id)
|
|
||||||
|
|
||||||
if fixed_ip is not None:
|
|
||||||
return self.db.fixed_ip_get_instance(context, fixed_ip)
|
|
||||||
|
|
||||||
if project_id or not context.is_admin:
|
|
||||||
if not context.project:
|
|
||||||
return self.db.instance_get_all_by_user(
|
|
||||||
context, context.user_id)
|
|
||||||
|
|
||||||
if project_id is None:
|
|
||||||
project_id = context.project_id
|
|
||||||
|
|
||||||
return self.db.instance_get_all_by_project(
|
|
||||||
context, project_id)
|
|
||||||
|
|
||||||
return self.db.instance_get_all(context)
|
|
||||||
|
|
||||||
def _cast_compute_message(self, method, context, instance_id, host=None,
|
def _cast_compute_message(self, method, context, instance_id, host=None,
|
||||||
params=None):
|
params=None):
|
||||||
"""Generic handler for RPC casts to compute.
|
"""Generic handler for RPC casts to compute.
|
||||||
|
|||||||
@@ -275,6 +275,11 @@ class FanoutAdapterConsumer(AdapterConsumer):
|
|||||||
unique = uuid.uuid4().hex
|
unique = uuid.uuid4().hex
|
||||||
self.queue = '%s_fanout_%s' % (topic, unique)
|
self.queue = '%s_fanout_%s' % (topic, unique)
|
||||||
self.durable = False
|
self.durable = False
|
||||||
|
# Fanout creates unique queue names, so we should auto-remove
|
||||||
|
# them when done, so they're not left around on restart.
|
||||||
|
# Also, we're the only one that should be consuming. exclusive
|
||||||
|
# implies auto_delete, so we'll just set that..
|
||||||
|
self.exclusive = True
|
||||||
LOG.info(_('Created "%(exchange)s" fanout exchange '
|
LOG.info(_('Created "%(exchange)s" fanout exchange '
|
||||||
'with "%(key)s" routing key'),
|
'with "%(key)s" routing key'),
|
||||||
dict(exchange=self.exchange, key=self.routing_key))
|
dict(exchange=self.exchange, key=self.routing_key))
|
||||||
|
|||||||
@@ -162,32 +162,53 @@ def child_zone_helper(zone_list, func):
|
|||||||
_wrap_method(_process, func), zone_list)]
|
_wrap_method(_process, func), zone_list)]
|
||||||
|
|
||||||
|
|
||||||
def _issue_novaclient_command(nova, zone, collection, method_name, item_id):
|
def _issue_novaclient_command(nova, zone, collection,
|
||||||
|
method_name, *args, **kwargs):
|
||||||
"""Use novaclient to issue command to a single child zone.
|
"""Use novaclient to issue command to a single child zone.
|
||||||
One of these will be run in parallel for each child zone."""
|
One of these will be run in parallel for each child zone.
|
||||||
|
"""
|
||||||
manager = getattr(nova, collection)
|
manager = getattr(nova, collection)
|
||||||
result = None
|
|
||||||
|
# NOTE(comstud): This is not ideal, but we have to do this based on
|
||||||
|
# how novaclient is implemented right now.
|
||||||
|
# 'find' is special cased as novaclient requires kwargs for it to
|
||||||
|
# filter on a 'get_all'.
|
||||||
|
# Every other method first needs to do a 'get' on the first argument
|
||||||
|
# passed, which should be a UUID. If it's 'get' itself that we want,
|
||||||
|
# we just return the result. Otherwise, we next call the real method
|
||||||
|
# that's wanted... passing other arguments that may or may not exist.
|
||||||
|
if method_name in ['find', 'findall']:
|
||||||
try:
|
try:
|
||||||
try:
|
return getattr(manager, method_name)(**kwargs)
|
||||||
result = manager.get(int(item_id))
|
|
||||||
except ValueError, e:
|
|
||||||
result = manager.find(name=item_id)
|
|
||||||
except novaclient.NotFound:
|
except novaclient.NotFound:
|
||||||
url = zone.api_url
|
url = zone.api_url
|
||||||
LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" %
|
LOG.debug(_("%(collection)s.%(method_name)s didn't find "
|
||||||
|
"anything matching '%(kwargs)s' on '%(url)s'" %
|
||||||
locals()))
|
locals()))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if method_name.lower() not in ['get', 'find']:
|
args = list(args)
|
||||||
result = getattr(result, method_name)()
|
# pop off the UUID to look up
|
||||||
|
item = args.pop(0)
|
||||||
|
try:
|
||||||
|
result = manager.get(item)
|
||||||
|
except novaclient.NotFound:
|
||||||
|
url = zone.api_url
|
||||||
|
LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" %
|
||||||
|
locals()))
|
||||||
|
return None
|
||||||
|
|
||||||
|
if method_name.lower() != 'get':
|
||||||
|
# if we're doing something other than 'get', call it passing args.
|
||||||
|
result = getattr(result, method_name)(*args, **kwargs)
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def wrap_novaclient_function(f, collection, method_name, item_id):
|
def wrap_novaclient_function(f, collection, method_name, *args, **kwargs):
|
||||||
"""Appends collection, method_name and item_id to the incoming
|
"""Appends collection, method_name and arguments to the incoming
|
||||||
(nova, zone) call from child_zone_helper."""
|
(nova, zone) call from child_zone_helper."""
|
||||||
def inner(nova, zone):
|
def inner(nova, zone):
|
||||||
return f(nova, zone, collection, method_name, item_id)
|
return f(nova, zone, collection, method_name, *args, **kwargs)
|
||||||
|
|
||||||
return inner
|
return inner
|
||||||
|
|
||||||
@@ -220,7 +241,7 @@ class reroute_compute(object):
|
|||||||
the wrapped method. (This ensures that zone-local code can
|
the wrapped method. (This ensures that zone-local code can
|
||||||
continue to use integer IDs).
|
continue to use integer IDs).
|
||||||
|
|
||||||
4. If the item was not found, we delgate the call to a child zone
|
4. If the item was not found, we delegate the call to a child zone
|
||||||
using the UUID.
|
using the UUID.
|
||||||
"""
|
"""
|
||||||
def __init__(self, method_name):
|
def __init__(self, method_name):
|
||||||
|
|||||||
@@ -329,8 +329,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
|||||||
'instance_type': <InstanceType dict>}
|
'instance_type': <InstanceType dict>}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def filter_hosts(self, num, request_spec):
|
def filter_hosts(self, topic, request_spec, hosts=None):
|
||||||
"""Filter the full host list (from the ZoneManager)"""
|
"""Filter the full host list (from the ZoneManager)"""
|
||||||
|
|
||||||
filter_name = request_spec.get('filter', None)
|
filter_name = request_spec.get('filter', None)
|
||||||
host_filter = choose_host_filter(filter_name)
|
host_filter = choose_host_filter(filter_name)
|
||||||
|
|
||||||
@@ -341,8 +342,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
|||||||
name, query = host_filter.instance_type_to_filter(instance_type)
|
name, query = host_filter.instance_type_to_filter(instance_type)
|
||||||
return host_filter.filter_hosts(self.zone_manager, query)
|
return host_filter.filter_hosts(self.zone_manager, query)
|
||||||
|
|
||||||
def weigh_hosts(self, num, request_spec, hosts):
|
def weigh_hosts(self, topic, request_spec, hosts):
|
||||||
"""Derived classes must override this method and return
|
"""Derived classes must override this method and return
|
||||||
a lists of hosts in [{weight, hostname}] format.
|
a lists of hosts in [{weight, hostname}] format.
|
||||||
"""
|
"""
|
||||||
return [dict(weight=1, hostname=host) for host, caps in hosts]
|
return [dict(weight=1, hostname=hostname, capabilities=caps)
|
||||||
|
for hostname, caps in hosts]
|
||||||
|
|||||||
@@ -48,25 +48,43 @@ def noop_cost_fn(host):
|
|||||||
return 1
|
return 1
|
||||||
|
|
||||||
|
|
||||||
flags.DEFINE_integer('fill_first_cost_fn_weight', 1,
|
flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
|
||||||
'How much weight to give the fill-first cost function')
|
'How much weight to give the fill-first cost function')
|
||||||
|
|
||||||
|
|
||||||
def fill_first_cost_fn(host):
|
def compute_fill_first_cost_fn(host):
|
||||||
"""Prefer hosts that have less ram available, filter_hosts will exclude
|
"""Prefer hosts that have less ram available, filter_hosts will exclude
|
||||||
hosts that don't have enough ram"""
|
hosts that don't have enough ram"""
|
||||||
hostname, caps = host
|
hostname, caps = host
|
||||||
free_mem = caps['compute']['host_memory_free']
|
free_mem = caps['host_memory_free']
|
||||||
return free_mem
|
return free_mem
|
||||||
|
|
||||||
|
|
||||||
class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||||
def get_cost_fns(self):
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.cost_fns_cache = {}
|
||||||
|
super(LeastCostScheduler, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
|
def get_cost_fns(self, topic):
|
||||||
"""Returns a list of tuples containing weights and cost functions to
|
"""Returns a list of tuples containing weights and cost functions to
|
||||||
use for weighing hosts
|
use for weighing hosts
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
if topic in self.cost_fns_cache:
|
||||||
|
return self.cost_fns_cache[topic]
|
||||||
|
|
||||||
cost_fns = []
|
cost_fns = []
|
||||||
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
|
||||||
|
if '.' in cost_fn_str:
|
||||||
|
short_name = cost_fn_str.split('.')[-1]
|
||||||
|
else:
|
||||||
|
short_name = cost_fn_str
|
||||||
|
cost_fn_str = "%s.%s.%s" % (
|
||||||
|
__name__, self.__class__.__name__, short_name)
|
||||||
|
|
||||||
|
if not (short_name.startswith('%s_' % topic) or
|
||||||
|
short_name.startswith('noop')):
|
||||||
|
continue
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# NOTE(sirp): import_class is somewhat misnamed since it can
|
# NOTE(sirp): import_class is somewhat misnamed since it can
|
||||||
@@ -84,23 +102,23 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
|||||||
|
|
||||||
cost_fns.append((weight, cost_fn))
|
cost_fns.append((weight, cost_fn))
|
||||||
|
|
||||||
|
self.cost_fns_cache[topic] = cost_fns
|
||||||
return cost_fns
|
return cost_fns
|
||||||
|
|
||||||
def weigh_hosts(self, num, request_spec, hosts):
|
def weigh_hosts(self, topic, request_spec, hosts):
|
||||||
"""Returns a list of dictionaries of form:
|
"""Returns a list of dictionaries of form:
|
||||||
[ {weight: weight, hostname: hostname} ]"""
|
[ {weight: weight, hostname: hostname, capabilities: capabs} ]
|
||||||
|
"""
|
||||||
|
|
||||||
# FIXME(sirp): weigh_hosts should handle more than just instances
|
cost_fns = self.get_cost_fns(topic)
|
||||||
hostnames = [hostname for hostname, caps in hosts]
|
|
||||||
|
|
||||||
cost_fns = self.get_cost_fns()
|
|
||||||
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
|
||||||
|
|
||||||
weighted = []
|
weighted = []
|
||||||
weight_log = []
|
weight_log = []
|
||||||
for cost, hostname in zip(costs, hostnames):
|
for cost, (hostname, caps) in zip(costs, hosts):
|
||||||
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||||
weight_dict = dict(weight=cost, hostname=hostname)
|
weight_dict = dict(weight=cost, hostname=hostname,
|
||||||
|
capabilities=caps)
|
||||||
weighted.append(weight_dict)
|
weighted.append(weight_dict)
|
||||||
|
|
||||||
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
LOG.debug(_("Weighted Costs => %s") % weight_log)
|
||||||
@@ -127,7 +145,8 @@ def weighted_sum(domain, weighted_fns, normalize=True):
|
|||||||
weighted_fns - list of weights and functions like:
|
weighted_fns - list of weights and functions like:
|
||||||
[(weight, objective-functions)]
|
[(weight, objective-functions)]
|
||||||
|
|
||||||
Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts)
|
Returns an unsorted list of scores. To pair with hosts do:
|
||||||
|
zip(scores, hosts)
|
||||||
"""
|
"""
|
||||||
# Table of form:
|
# Table of form:
|
||||||
# { domain1: [score1, score2, ..., scoreM]
|
# { domain1: [score1, score2, ..., scoreM]
|
||||||
@@ -150,7 +169,6 @@ def weighted_sum(domain, weighted_fns, normalize=True):
|
|||||||
domain_scores = []
|
domain_scores = []
|
||||||
for idx in sorted(score_table):
|
for idx in sorted(score_table):
|
||||||
elem_score = sum(score_table[idx])
|
elem_score = sum(score_table[idx])
|
||||||
elem = domain[idx]
|
|
||||||
domain_scores.append(elem_score)
|
domain_scores.append(elem_score)
|
||||||
|
|
||||||
return domain_scores
|
return domain_scores
|
||||||
|
|||||||
@@ -180,18 +180,22 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
request_spec, kwargs)
|
request_spec, kwargs)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
num_instances = request_spec.get('num_instances', 1)
|
||||||
|
LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
|
||||||
|
locals())
|
||||||
|
|
||||||
# Create build plan and provision ...
|
# Create build plan and provision ...
|
||||||
build_plan = self.select(context, request_spec)
|
build_plan = self.select(context, request_spec)
|
||||||
if not build_plan:
|
if not build_plan:
|
||||||
raise driver.NoValidHost(_('No hosts were available'))
|
raise driver.NoValidHost(_('No hosts were available'))
|
||||||
|
|
||||||
for num in xrange(request_spec['num_instances']):
|
for num in xrange(num_instances):
|
||||||
if not build_plan:
|
if not build_plan:
|
||||||
break
|
break
|
||||||
|
|
||||||
item = build_plan.pop(0)
|
build_plan_item = build_plan.pop(0)
|
||||||
self._provision_resource(context, item, instance_id, request_spec,
|
self._provision_resource(context, build_plan_item, instance_id,
|
||||||
kwargs)
|
request_spec, kwargs)
|
||||||
|
|
||||||
# Returning None short-circuits the routing to Compute (since
|
# Returning None short-circuits the routing to Compute (since
|
||||||
# we've already done it here)
|
# we've already done it here)
|
||||||
@@ -224,18 +228,36 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
raise NotImplemented(_("Zone Aware Scheduler only understands "
|
raise NotImplemented(_("Zone Aware Scheduler only understands "
|
||||||
"Compute nodes (for now)"))
|
"Compute nodes (for now)"))
|
||||||
|
|
||||||
#TODO(sandy): how to infer this from OS API params?
|
num_instances = request_spec.get('num_instances', 1)
|
||||||
num_instances = 1
|
instance_type = request_spec['instance_type']
|
||||||
|
|
||||||
|
weighted = []
|
||||||
|
host_list = None
|
||||||
|
|
||||||
|
for i in xrange(num_instances):
|
||||||
# Filter local hosts based on requirements ...
|
# Filter local hosts based on requirements ...
|
||||||
host_list = self.filter_hosts(num_instances, request_spec)
|
#
|
||||||
|
# The first pass through here will pass 'None' as the
|
||||||
# TODO(sirp): weigh_hosts should also be a function of 'topic' or
|
# host_list.. which tells the filter to build the full
|
||||||
# resources, so that we can apply different objective functions to it
|
# list of hosts.
|
||||||
|
# On a 2nd pass, the filter can modify the host_list with
|
||||||
|
# any updates it needs to make based on resources that
|
||||||
|
# may have been consumed from a previous build..
|
||||||
|
host_list = self.filter_hosts(topic, request_spec, host_list)
|
||||||
|
if not host_list:
|
||||||
|
LOG.warn(_("Filter returned no hosts after processing "
|
||||||
|
"%(i)d of %(num_instances)d instances") % locals())
|
||||||
|
break
|
||||||
|
|
||||||
# then weigh the selected hosts.
|
# then weigh the selected hosts.
|
||||||
# weighted = [{weight=weight, name=hostname}, ...]
|
# weighted = [{weight=weight, hostname=hostname,
|
||||||
weighted = self.weigh_hosts(num_instances, request_spec, host_list)
|
# capabilities=capabs}, ...]
|
||||||
|
weights = self.weigh_hosts(topic, request_spec, host_list)
|
||||||
|
weights.sort(key=operator.itemgetter('weight'))
|
||||||
|
best_weight = weights[0]
|
||||||
|
weighted.append(best_weight)
|
||||||
|
self.consume_resources(topic, best_weight['capabilities'],
|
||||||
|
instance_type)
|
||||||
|
|
||||||
# Next, tack on the best weights from the child zones ...
|
# Next, tack on the best weights from the child zones ...
|
||||||
json_spec = json.dumps(request_spec)
|
json_spec = json.dumps(request_spec)
|
||||||
@@ -254,18 +276,65 @@ class ZoneAwareScheduler(driver.Scheduler):
|
|||||||
weighted.sort(key=operator.itemgetter('weight'))
|
weighted.sort(key=operator.itemgetter('weight'))
|
||||||
return weighted
|
return weighted
|
||||||
|
|
||||||
def filter_hosts(self, num, request_spec):
|
def compute_filter(self, hostname, capabilities, request_spec):
|
||||||
"""Derived classes must override this method and return
|
"""Return whether or not we can schedule to this compute node.
|
||||||
a list of hosts in [(hostname, capability_dict)] format.
|
Derived classes should override this and return True if the host
|
||||||
|
is acceptable for scheduling.
|
||||||
"""
|
"""
|
||||||
# NOTE(sirp): The default logic is the equivalent to AllHostsFilter
|
instance_type = request_spec['instance_type']
|
||||||
service_states = self.zone_manager.service_states
|
requested_mem = instance_type['memory_mb'] * 1024 * 1024
|
||||||
return [(host, services)
|
return capabilities['host_memory_free'] >= requested_mem
|
||||||
for host, services in service_states.iteritems()]
|
|
||||||
|
|
||||||
def weigh_hosts(self, num, request_spec, hosts):
|
def filter_hosts(self, topic, request_spec, host_list=None):
|
||||||
|
"""Return a list of hosts which are acceptable for scheduling.
|
||||||
|
Return value should be a list of (hostname, capability_dict)s.
|
||||||
|
Derived classes may override this, but may find the
|
||||||
|
'<topic>_filter' function more appropriate.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _default_filter(self, hostname, capabilities, request_spec):
|
||||||
|
"""Default filter function if there's no <topic>_filter"""
|
||||||
|
# NOTE(sirp): The default logic is the equivalent to
|
||||||
|
# AllHostsFilter
|
||||||
|
return True
|
||||||
|
|
||||||
|
filter_func = getattr(self, '%s_filter' % topic, _default_filter)
|
||||||
|
|
||||||
|
if host_list is None:
|
||||||
|
first_run = True
|
||||||
|
host_list = self.zone_manager.service_states.iteritems()
|
||||||
|
else:
|
||||||
|
first_run = False
|
||||||
|
|
||||||
|
filtered_hosts = []
|
||||||
|
for host, services in host_list:
|
||||||
|
if first_run:
|
||||||
|
if topic not in services:
|
||||||
|
continue
|
||||||
|
services = services[topic]
|
||||||
|
if filter_func(host, services, request_spec):
|
||||||
|
filtered_hosts.append((host, services))
|
||||||
|
return filtered_hosts
|
||||||
|
|
||||||
|
def weigh_hosts(self, topic, request_spec, hosts):
|
||||||
"""Derived classes may override this to provide more sophisticated
|
"""Derived classes may override this to provide more sophisticated
|
||||||
scheduling objectives
|
scheduling objectives
|
||||||
"""
|
"""
|
||||||
# NOTE(sirp): The default logic is the same as the NoopCostFunction
|
# NOTE(sirp): The default logic is the same as the NoopCostFunction
|
||||||
return [dict(weight=1, hostname=host) for host, caps in hosts]
|
return [dict(weight=1, hostname=hostname, capabilities=capabilities)
|
||||||
|
for hostname, capabilities in hosts]
|
||||||
|
|
||||||
|
def compute_consume(self, capabilities, instance_type):
|
||||||
|
"""Consume compute resources for selected host"""
|
||||||
|
|
||||||
|
requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024
|
||||||
|
capabilities['host_memory_free'] -= requested_mem
|
||||||
|
|
||||||
|
def consume_resources(self, topic, capabilities, instance_type):
|
||||||
|
"""Consume resources for a specific host. 'host' is a tuple
|
||||||
|
of the hostname and the services"""
|
||||||
|
|
||||||
|
consume_func = getattr(self, '%s_consume' % topic, None)
|
||||||
|
if not consume_func:
|
||||||
|
return
|
||||||
|
consume_func(capabilities, instance_type)
|
||||||
|
|||||||
@@ -161,12 +161,12 @@ class PaginationParamsTest(test.TestCase):
|
|||||||
def test_no_params(self):
|
def test_no_params(self):
|
||||||
""" Test no params. """
|
""" Test no params. """
|
||||||
req = Request.blank('/')
|
req = Request.blank('/')
|
||||||
self.assertEqual(common.get_pagination_params(req), (0, 0))
|
self.assertEqual(common.get_pagination_params(req), {})
|
||||||
|
|
||||||
def test_valid_marker(self):
|
def test_valid_marker(self):
|
||||||
""" Test valid marker param. """
|
""" Test valid marker param. """
|
||||||
req = Request.blank('/?marker=1')
|
req = Request.blank('/?marker=1')
|
||||||
self.assertEqual(common.get_pagination_params(req), (1, 0))
|
self.assertEqual(common.get_pagination_params(req), {'marker': 1})
|
||||||
|
|
||||||
def test_invalid_marker(self):
|
def test_invalid_marker(self):
|
||||||
""" Test invalid marker param. """
|
""" Test invalid marker param. """
|
||||||
@@ -177,10 +177,16 @@ class PaginationParamsTest(test.TestCase):
|
|||||||
def test_valid_limit(self):
|
def test_valid_limit(self):
|
||||||
""" Test valid limit param. """
|
""" Test valid limit param. """
|
||||||
req = Request.blank('/?limit=10')
|
req = Request.blank('/?limit=10')
|
||||||
self.assertEqual(common.get_pagination_params(req), (0, 10))
|
self.assertEqual(common.get_pagination_params(req), {'limit': 10})
|
||||||
|
|
||||||
def test_invalid_limit(self):
|
def test_invalid_limit(self):
|
||||||
""" Test invalid limit param. """
|
""" Test invalid limit param. """
|
||||||
req = Request.blank('/?limit=-2')
|
req = Request.blank('/?limit=-2')
|
||||||
self.assertRaises(
|
self.assertRaises(
|
||||||
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
|
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
|
||||||
|
|
||||||
|
def test_valid_limit_and_marker(self):
|
||||||
|
""" Test valid limit and marker parameters. """
|
||||||
|
req = Request.blank('/?limit=20&marker=40')
|
||||||
|
self.assertEqual(common.get_pagination_params(req),
|
||||||
|
{'marker': 40, 'limit': 20})
|
||||||
|
|||||||
@@ -803,7 +803,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
context = object()
|
context = object()
|
||||||
filters = {'name': 'testname'}
|
filters = {'name': 'testname'}
|
||||||
image_service.index(
|
image_service.index(
|
||||||
context, filters=filters, marker=0, limit=0).AndReturn([])
|
context, filters=filters).AndReturn([])
|
||||||
mocker.ReplayAll()
|
mocker.ReplayAll()
|
||||||
request = webob.Request.blank(
|
request = webob.Request.blank(
|
||||||
'/v1.1/images?name=testname')
|
'/v1.1/images?name=testname')
|
||||||
@@ -818,7 +818,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
context = object()
|
context = object()
|
||||||
filters = {'status': 'ACTIVE'}
|
filters = {'status': 'ACTIVE'}
|
||||||
image_service.index(
|
image_service.index(
|
||||||
context, filters=filters, marker=0, limit=0).AndReturn([])
|
context, filters=filters).AndReturn([])
|
||||||
mocker.ReplayAll()
|
mocker.ReplayAll()
|
||||||
request = webob.Request.blank(
|
request = webob.Request.blank(
|
||||||
'/v1.1/images?status=ACTIVE')
|
'/v1.1/images?status=ACTIVE')
|
||||||
@@ -833,7 +833,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
context = object()
|
context = object()
|
||||||
filters = {'property-test': '3'}
|
filters = {'property-test': '3'}
|
||||||
image_service.index(
|
image_service.index(
|
||||||
context, filters=filters, marker=0, limit=0).AndReturn([])
|
context, filters=filters).AndReturn([])
|
||||||
mocker.ReplayAll()
|
mocker.ReplayAll()
|
||||||
request = webob.Request.blank(
|
request = webob.Request.blank(
|
||||||
'/v1.1/images?property-test=3')
|
'/v1.1/images?property-test=3')
|
||||||
@@ -848,7 +848,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
context = object()
|
context = object()
|
||||||
filters = {'status': 'ACTIVE'}
|
filters = {'status': 'ACTIVE'}
|
||||||
image_service.index(
|
image_service.index(
|
||||||
context, filters=filters, marker=0, limit=0).AndReturn([])
|
context, filters=filters).AndReturn([])
|
||||||
mocker.ReplayAll()
|
mocker.ReplayAll()
|
||||||
request = webob.Request.blank(
|
request = webob.Request.blank(
|
||||||
'/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname')
|
'/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname')
|
||||||
@@ -863,7 +863,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
context = object()
|
context = object()
|
||||||
filters = {}
|
filters = {}
|
||||||
image_service.index(
|
image_service.index(
|
||||||
context, filters=filters, marker=0, limit=0).AndReturn([])
|
context, filters=filters).AndReturn([])
|
||||||
mocker.ReplayAll()
|
mocker.ReplayAll()
|
||||||
request = webob.Request.blank(
|
request = webob.Request.blank(
|
||||||
'/v1.1/images')
|
'/v1.1/images')
|
||||||
@@ -878,7 +878,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
context = object()
|
context = object()
|
||||||
filters = {'name': 'testname'}
|
filters = {'name': 'testname'}
|
||||||
image_service.detail(
|
image_service.detail(
|
||||||
context, filters=filters, marker=0, limit=0).AndReturn([])
|
context, filters=filters).AndReturn([])
|
||||||
mocker.ReplayAll()
|
mocker.ReplayAll()
|
||||||
request = webob.Request.blank(
|
request = webob.Request.blank(
|
||||||
'/v1.1/images/detail?name=testname')
|
'/v1.1/images/detail?name=testname')
|
||||||
@@ -893,7 +893,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
context = object()
|
context = object()
|
||||||
filters = {'status': 'ACTIVE'}
|
filters = {'status': 'ACTIVE'}
|
||||||
image_service.detail(
|
image_service.detail(
|
||||||
context, filters=filters, marker=0, limit=0).AndReturn([])
|
context, filters=filters).AndReturn([])
|
||||||
mocker.ReplayAll()
|
mocker.ReplayAll()
|
||||||
request = webob.Request.blank(
|
request = webob.Request.blank(
|
||||||
'/v1.1/images/detail?status=ACTIVE')
|
'/v1.1/images/detail?status=ACTIVE')
|
||||||
@@ -908,7 +908,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
context = object()
|
context = object()
|
||||||
filters = {'property-test': '3'}
|
filters = {'property-test': '3'}
|
||||||
image_service.detail(
|
image_service.detail(
|
||||||
context, filters=filters, marker=0, limit=0).AndReturn([])
|
context, filters=filters).AndReturn([])
|
||||||
mocker.ReplayAll()
|
mocker.ReplayAll()
|
||||||
request = webob.Request.blank(
|
request = webob.Request.blank(
|
||||||
'/v1.1/images/detail?property-test=3')
|
'/v1.1/images/detail?property-test=3')
|
||||||
@@ -923,7 +923,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
context = object()
|
context = object()
|
||||||
filters = {'status': 'ACTIVE'}
|
filters = {'status': 'ACTIVE'}
|
||||||
image_service.detail(
|
image_service.detail(
|
||||||
context, filters=filters, marker=0, limit=0).AndReturn([])
|
context, filters=filters).AndReturn([])
|
||||||
mocker.ReplayAll()
|
mocker.ReplayAll()
|
||||||
request = webob.Request.blank(
|
request = webob.Request.blank(
|
||||||
'/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname')
|
'/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname')
|
||||||
@@ -938,7 +938,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
|
|||||||
context = object()
|
context = object()
|
||||||
filters = {}
|
filters = {}
|
||||||
image_service.detail(
|
image_service.detail(
|
||||||
context, filters=filters, marker=0, limit=0).AndReturn([])
|
context, filters=filters).AndReturn([])
|
||||||
mocker.ReplayAll()
|
mocker.ReplayAll()
|
||||||
request = webob.Request.blank(
|
request = webob.Request.blank(
|
||||||
'/v1.1/images/detail')
|
'/v1.1/images/detail')
|
||||||
|
|||||||
@@ -122,15 +122,16 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
|||||||
for hostname, caps in hosts]
|
for hostname, caps in hosts]
|
||||||
self.assertWeights(expected, num, request_spec, hosts)
|
self.assertWeights(expected, num, request_spec, hosts)
|
||||||
|
|
||||||
def test_fill_first_cost_fn(self):
|
def test_compute_fill_first_cost_fn(self):
|
||||||
FLAGS.least_cost_scheduler_cost_functions = [
|
FLAGS.least_cost_scheduler_cost_functions = [
|
||||||
'nova.scheduler.least_cost.fill_first_cost_fn',
|
'nova.scheduler.least_cost.compute_fill_first_cost_fn',
|
||||||
]
|
]
|
||||||
FLAGS.fill_first_cost_fn_weight = 1
|
FLAGS.compute_fill_first_cost_fn_weight = 1
|
||||||
|
|
||||||
num = 1
|
num = 1
|
||||||
request_spec = {}
|
instance_type = {'memory_mb': 1024}
|
||||||
hosts = self.sched.filter_hosts(num, request_spec)
|
request_spec = {'instance_type': instance_type}
|
||||||
|
hosts = self.sched.filter_hosts('compute', request_spec, None)
|
||||||
|
|
||||||
expected = []
|
expected = []
|
||||||
for idx, (hostname, caps) in enumerate(hosts):
|
for idx, (hostname, caps) in enumerate(hosts):
|
||||||
|
|||||||
@@ -1074,7 +1074,7 @@ class DynamicNovaClientTest(test.TestCase):
|
|||||||
|
|
||||||
self.assertEquals(api._issue_novaclient_command(
|
self.assertEquals(api._issue_novaclient_command(
|
||||||
FakeNovaClient(FakeServerCollection()),
|
FakeNovaClient(FakeServerCollection()),
|
||||||
zone, "servers", "find", "name").b, 22)
|
zone, "servers", "find", name="test").b, 22)
|
||||||
|
|
||||||
self.assertEquals(api._issue_novaclient_command(
|
self.assertEquals(api._issue_novaclient_command(
|
||||||
FakeNovaClient(FakeServerCollection()),
|
FakeNovaClient(FakeServerCollection()),
|
||||||
@@ -1088,7 +1088,7 @@ class DynamicNovaClientTest(test.TestCase):
|
|||||||
|
|
||||||
self.assertEquals(api._issue_novaclient_command(
|
self.assertEquals(api._issue_novaclient_command(
|
||||||
FakeNovaClient(FakeEmptyServerCollection()),
|
FakeNovaClient(FakeEmptyServerCollection()),
|
||||||
zone, "servers", "find", "name"), None)
|
zone, "servers", "find", name="test"), None)
|
||||||
|
|
||||||
self.assertEquals(api._issue_novaclient_command(
|
self.assertEquals(api._issue_novaclient_command(
|
||||||
FakeNovaClient(FakeEmptyServerCollection()),
|
FakeNovaClient(FakeEmptyServerCollection()),
|
||||||
|
|||||||
@@ -55,29 +55,21 @@ def fake_zone_manager_service_states(num_hosts):
|
|||||||
|
|
||||||
|
|
||||||
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
|
||||||
def filter_hosts(self, num, specs):
|
# No need to stub anything at the moment
|
||||||
# NOTE(sirp): this is returning [(hostname, services)]
|
pass
|
||||||
return self.zone_manager.service_states.items()
|
|
||||||
|
|
||||||
def weigh_hosts(self, num, specs, hosts):
|
|
||||||
fake_weight = 99
|
|
||||||
weighted = []
|
|
||||||
for hostname, caps in hosts:
|
|
||||||
weighted.append(dict(weight=fake_weight, name=hostname))
|
|
||||||
return weighted
|
|
||||||
|
|
||||||
|
|
||||||
class FakeZoneManager(zone_manager.ZoneManager):
|
class FakeZoneManager(zone_manager.ZoneManager):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.service_states = {
|
self.service_states = {
|
||||||
'host1': {
|
'host1': {
|
||||||
'compute': {'ram': 1000},
|
'compute': {'host_memory_free': 1073741824},
|
||||||
},
|
},
|
||||||
'host2': {
|
'host2': {
|
||||||
'compute': {'ram': 2000},
|
'compute': {'host_memory_free': 2147483648},
|
||||||
},
|
},
|
||||||
'host3': {
|
'host3': {
|
||||||
'compute': {'ram': 3000},
|
'compute': {'host_memory_free': 3221225472},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -154,8 +146,8 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_zone_aware_scheduler(self):
|
def test_zone_aware_scheduler(self):
|
||||||
"""
|
"""
|
||||||
Create a nested set of FakeZones, ensure that a select call returns the
|
Create a nested set of FakeZones, try to build multiple instances
|
||||||
appropriate build plan.
|
and ensure that a select call returns the appropriate build plan.
|
||||||
"""
|
"""
|
||||||
sched = FakeZoneAwareScheduler()
|
sched = FakeZoneAwareScheduler()
|
||||||
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
|
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
|
||||||
@@ -164,13 +156,17 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
|
|||||||
sched.set_zone_manager(zm)
|
sched.set_zone_manager(zm)
|
||||||
|
|
||||||
fake_context = {}
|
fake_context = {}
|
||||||
build_plan = sched.select(fake_context, {})
|
build_plan = sched.select(fake_context,
|
||||||
|
{'instance_type': {'memory_mb': 512},
|
||||||
|
'num_instances': 4})
|
||||||
|
|
||||||
self.assertEqual(15, len(build_plan))
|
# 4 from local zones, 12 from remotes
|
||||||
|
self.assertEqual(16, len(build_plan))
|
||||||
|
|
||||||
hostnames = [plan_item['name']
|
hostnames = [plan_item['hostname']
|
||||||
for plan_item in build_plan if 'name' in plan_item]
|
for plan_item in build_plan if 'hostname' in plan_item]
|
||||||
self.assertEqual(3, len(hostnames))
|
# 4 local hosts
|
||||||
|
self.assertEqual(4, len(hostnames))
|
||||||
|
|
||||||
def test_empty_zone_aware_scheduler(self):
|
def test_empty_zone_aware_scheduler(self):
|
||||||
"""
|
"""
|
||||||
@@ -185,8 +181,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
|
|||||||
fake_context = {}
|
fake_context = {}
|
||||||
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
|
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
|
||||||
fake_context, 1,
|
fake_context, 1,
|
||||||
dict(host_filter=None,
|
dict(host_filter=None, instance_type={}))
|
||||||
request_spec={'instance_type': {}}))
|
|
||||||
|
|
||||||
def test_schedule_do_not_schedule_with_hint(self):
|
def test_schedule_do_not_schedule_with_hint(self):
|
||||||
"""
|
"""
|
||||||
|
|||||||
@@ -276,6 +276,19 @@ class GenericUtilsTestCase(test.TestCase):
|
|||||||
result = utils.parse_server_string('www.exa:mple.com:8443')
|
result = utils.parse_server_string('www.exa:mple.com:8443')
|
||||||
self.assertEqual(('', ''), result)
|
self.assertEqual(('', ''), result)
|
||||||
|
|
||||||
|
def test_bool_from_str(self):
|
||||||
|
self.assertTrue(utils.bool_from_str('1'))
|
||||||
|
self.assertTrue(utils.bool_from_str('2'))
|
||||||
|
self.assertTrue(utils.bool_from_str('-1'))
|
||||||
|
self.assertTrue(utils.bool_from_str('true'))
|
||||||
|
self.assertTrue(utils.bool_from_str('True'))
|
||||||
|
self.assertTrue(utils.bool_from_str('tRuE'))
|
||||||
|
self.assertFalse(utils.bool_from_str('False'))
|
||||||
|
self.assertFalse(utils.bool_from_str('false'))
|
||||||
|
self.assertFalse(utils.bool_from_str('0'))
|
||||||
|
self.assertFalse(utils.bool_from_str(None))
|
||||||
|
self.assertFalse(utils.bool_from_str('junk'))
|
||||||
|
|
||||||
|
|
||||||
class IsUUIDLikeTestCase(test.TestCase):
|
class IsUUIDLikeTestCase(test.TestCase):
|
||||||
def assertUUIDLike(self, val, expected):
|
def assertUUIDLike(self, val, expected):
|
||||||
|
|||||||
@@ -772,6 +772,17 @@ def is_uuid_like(val):
|
|||||||
return (len(val) == 36) and (val.count('-') == 4)
|
return (len(val) == 36) and (val.count('-') == 4)
|
||||||
|
|
||||||
|
|
||||||
|
def bool_from_str(val):
|
||||||
|
"""Convert a string representation of a bool into a bool value"""
|
||||||
|
|
||||||
|
if not val:
|
||||||
|
return False
|
||||||
|
try:
|
||||||
|
return True if int(val) else False
|
||||||
|
except ValueError:
|
||||||
|
return val.lower() == 'true'
|
||||||
|
|
||||||
|
|
||||||
class Bootstrapper(object):
|
class Bootstrapper(object):
|
||||||
"""Provides environment bootstrapping capabilities for entry points."""
|
"""Provides environment bootstrapping capabilities for entry points."""
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ boto==1.9b
|
|||||||
carrot==0.10.5
|
carrot==0.10.5
|
||||||
eventlet
|
eventlet
|
||||||
lockfile==0.8
|
lockfile==0.8
|
||||||
python-novaclient==2.5.3
|
python-novaclient==2.5.7
|
||||||
python-daemon==1.5.5
|
python-daemon==1.5.5
|
||||||
python-gflags==1.3
|
python-gflags==1.3
|
||||||
redis==2.0.0
|
redis==2.0.0
|
||||||
|
|||||||
Reference in New Issue
Block a user