
Fixes bug 844160 Makes the servers create API call work with all schedulers, removes 'zone boot', and folds create_instance_helper back into servers controller. Notable changes: 1) compute API's create_at_all_once has been removed. It was only used by zone boot. 2) compute API's create() no longer creates Instance DB entries. The schedulers now do this. This makes sense, as only the schedulers will know where the instances will be placed. They could be placed locally or in a child zone. However, this comes at a cost. compute_api.create() now does a 'call' to the scheduler instead of a 'cast' in most cases (* see below). This is so it can receive the instance ID(s) that were created back from the scheduler. Ultimately, we probably need to figure out a way to generate UUIDs before scheduling and return only the information we know about an instance before it is actually scheduled and created. We could then revert this back to a cast. (Or maybe we always return a reservation ID instead of an instance.) 3) scheduler* calls do not return a host now. They return a value that'll be returned if the caller does an rpc.call(). The casts to hosts are now done by the scheduler drivers themselves. 4) There's been an undocumented feature in the OS API to allow multiple instances to be built. I've kept it. 5) If compute_api.create() is creating multiple instances, only a single call is made to the scheduler, vs the old way of sending many casts. All schedulers now check how many instances have been requested. 6) I've added an undocumented option 'return_reservation_id' when building. If set to True, only a reservation ID is returned to the API caller, not the instance. This essentially gives you the old 'nova zone-boot' functionality. 7) It was requested I create a stub for a zones extension, so you'll see the empty extension in here. We'll move some code to it later. 8) Fixes an unrelated bug that merged into trunk recently where zones DB calls were not being done with admin context always, anymore. 9) Scheduler calls were always done with admin context when they should elevate only when needed. 10) Moved stub_network flag so individual tests can run again. * Case #6 above doesn't wait for the scheduler response with instance IDs. It does a 'cast' instead. Change-Id: Ic040780a2e86d7330e225f14056dadbaa9fb3c7e
130 lines
6.0 KiB
Python
130 lines
6.0 KiB
Python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
|
|
# Copyright (c) 2010 Openstack, LLC.
|
|
# Copyright 2010 United States Government as represented by the
|
|
# Administrator of the National Aeronautics and Space Administration.
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
"""
|
|
Simple Scheduler
|
|
"""
|
|
|
|
from nova import db
|
|
from nova import flags
|
|
from nova import utils
|
|
from nova.scheduler import driver
|
|
from nova.scheduler import chance
|
|
|
|
FLAGS = flags.FLAGS
|
|
flags.DEFINE_integer("max_cores", 16,
|
|
"maximum number of instance cores to allow per host")
|
|
flags.DEFINE_integer("max_gigabytes", 10000,
|
|
"maximum number of volume gigabytes to allow per host")
|
|
flags.DEFINE_integer("max_networks", 1000,
|
|
"maximum number of networks to allow per host")
|
|
|
|
|
|
class SimpleScheduler(chance.ChanceScheduler):
|
|
"""Implements Naive Scheduler that tries to find least loaded host."""
|
|
|
|
def _schedule_instance(self, context, instance_opts, *_args, **_kwargs):
|
|
"""Picks a host that is up and has the fewest running instances."""
|
|
|
|
availability_zone = instance_opts.get('availability_zone')
|
|
|
|
if availability_zone and context.is_admin and \
|
|
(':' in availability_zone):
|
|
zone, host = availability_zone.split(':', 1)
|
|
service = db.service_get_by_args(context.elevated(), host,
|
|
'nova-compute')
|
|
if not self.service_is_up(service):
|
|
raise driver.WillNotSchedule(_("Host %s is not alive") % host)
|
|
return host
|
|
|
|
results = db.service_get_all_compute_sorted(context)
|
|
for result in results:
|
|
(service, instance_cores) = result
|
|
if instance_cores + instance_opts['vcpus'] > FLAGS.max_cores:
|
|
raise driver.NoValidHost(_("All hosts have too many cores"))
|
|
if self.service_is_up(service):
|
|
return service['host']
|
|
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
|
" for this request. Is the appropriate"
|
|
" service running?"))
|
|
|
|
def schedule_run_instance(self, context, request_spec, *_args, **_kwargs):
|
|
num_instances = request_spec.get('num_instances', 1)
|
|
instances = []
|
|
for num in xrange(num_instances):
|
|
host = self._schedule_instance(context,
|
|
request_spec['instance_properties'], *_args, **_kwargs)
|
|
instance_ref = self.create_instance_db_entry(context,
|
|
request_spec)
|
|
driver.cast_to_compute_host(context, host, 'run_instance',
|
|
instance_id=instance_ref['id'], **_kwargs)
|
|
instances.append(driver.encode_instance(instance_ref))
|
|
return instances
|
|
|
|
def schedule_start_instance(self, context, instance_id, *_args, **_kwargs):
|
|
instance_ref = db.instance_get(context, instance_id)
|
|
host = self._schedule_instance(context, instance_ref,
|
|
*_args, **_kwargs)
|
|
driver.cast_to_compute_host(context, host, 'start_instance',
|
|
instance_id=intance_id, **_kwargs)
|
|
|
|
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
|
|
"""Picks a host that is up and has the fewest volumes."""
|
|
volume_ref = db.volume_get(context, volume_id)
|
|
if (volume_ref['availability_zone']
|
|
and ':' in volume_ref['availability_zone']
|
|
and context.is_admin):
|
|
zone, _x, host = volume_ref['availability_zone'].partition(':')
|
|
service = db.service_get_by_args(context.elevated(), host,
|
|
'nova-volume')
|
|
if not self.service_is_up(service):
|
|
raise driver.WillNotSchedule(_("Host %s not available") % host)
|
|
driver.cast_to_volume_host(context, host, 'create_volume',
|
|
volume_id=volume_id, **_kwargs)
|
|
return None
|
|
results = db.service_get_all_volume_sorted(context)
|
|
for result in results:
|
|
(service, volume_gigabytes) = result
|
|
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
|
|
raise driver.NoValidHost(_("All hosts have too many "
|
|
"gigabytes"))
|
|
if self.service_is_up(service):
|
|
driver.cast_to_volume_host(context, service['host'],
|
|
'create_volume', volume_id=volume_id, **_kwargs)
|
|
return None
|
|
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
|
" for this request. Is the appropriate"
|
|
" service running?"))
|
|
|
|
def schedule_set_network_host(self, context, *_args, **_kwargs):
|
|
"""Picks a host that is up and has the fewest networks."""
|
|
|
|
results = db.service_get_all_network_sorted(context)
|
|
for result in results:
|
|
(service, instance_count) = result
|
|
if instance_count >= FLAGS.max_networks:
|
|
raise driver.NoValidHost(_("All hosts have too many networks"))
|
|
if self.service_is_up(service):
|
|
driver.cast_to_network_host(context, service['host'],
|
|
'set_network_host', **_kwargs)
|
|
return None
|
|
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
|
" for this request. Is the appropriate"
|
|
" service running?"))
|