
This patch makes rpc calls that are the result of booting an instance propagate the new block device mapping format. The way this is done is by adding a 'legacy_bdm' flag to the conductor (task) API. Since both the scheduler and the compute service propagate the block device mapping as part of the request_spec field, a flag named legacy_bdm_in_spec to indicate that was introduced in their respective APIs. Currently block_device_mapping is not used by any of the in-tree filters in the scheduler, so it is assumed that scheduler's boot functionality has now transitioned to the new format. This patch also bumps RPC versions of the task, scheduler and compute manager classes. Finally this patch propagates the legacy_bdm_in_spec through the run_instance callback of the compute manager so that, in case of error - the instance is re-scheduled with the proper flag set. blueprint: improve-block-device-handling Change-Id: I5f25ddd4d586dda91061f065c1796be726b0ede3
120 lines
4.9 KiB
Python
120 lines
4.9 KiB
Python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
|
|
|
# Copyright (c) 2010 OpenStack Foundation
|
|
# Copyright 2010 United States Government as represented by the
|
|
# Administrator of the National Aeronautics and Space Administration.
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
"""
|
|
Chance (Random) Scheduler implementation
|
|
"""
|
|
|
|
import random
|
|
|
|
from oslo.config import cfg
|
|
|
|
from nova.compute import rpcapi as compute_rpcapi
|
|
from nova import exception
|
|
from nova.openstack.common.gettextutils import _
|
|
from nova.scheduler import driver
|
|
|
|
CONF = cfg.CONF
|
|
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
|
|
|
|
|
|
class ChanceScheduler(driver.Scheduler):
|
|
"""Implements Scheduler as a random node selector."""
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
super(ChanceScheduler, self).__init__(*args, **kwargs)
|
|
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
|
|
|
|
def _filter_hosts(self, request_spec, hosts, filter_properties):
|
|
"""Filter a list of hosts based on request_spec."""
|
|
|
|
ignore_hosts = filter_properties.get('ignore_hosts', [])
|
|
hosts = [host for host in hosts if host not in ignore_hosts]
|
|
return hosts
|
|
|
|
def _schedule(self, context, topic, request_spec, filter_properties):
|
|
"""Picks a host that is up at random."""
|
|
|
|
elevated = context.elevated()
|
|
hosts = self.hosts_up(elevated, topic)
|
|
if not hosts:
|
|
msg = _("Is the appropriate service running?")
|
|
raise exception.NoValidHost(reason=msg)
|
|
|
|
hosts = self._filter_hosts(request_spec, hosts, filter_properties)
|
|
if not hosts:
|
|
msg = _("Could not find another compute")
|
|
raise exception.NoValidHost(reason=msg)
|
|
|
|
return random.choice(hosts)
|
|
|
|
def select_hosts(self, context, request_spec, filter_properties):
|
|
"""Selects a set of random hosts."""
|
|
hosts = [self._schedule(context, CONF.compute_topic,
|
|
request_spec, filter_properties)
|
|
for instance_uuid in request_spec.get('instance_uuids', [])]
|
|
if not hosts:
|
|
raise exception.NoValidHost(reason="")
|
|
return hosts
|
|
|
|
def select_destinations(self, context, request_spec, filter_properties):
|
|
"""Selects random destinations."""
|
|
num_instances = request_spec['num_instances']
|
|
# NOTE(timello): Returns a list of dicts with 'host', 'nodename' and
|
|
# 'limits' as keys for compatibility with filter_scheduler.
|
|
dests = []
|
|
for i in range(num_instances):
|
|
host = self._schedule(context, CONF.compute_topic,
|
|
request_spec, filter_properties)
|
|
host_state = dict(host=host, nodename=None, limits=None)
|
|
dests.append(host_state)
|
|
|
|
if len(dests) < num_instances:
|
|
raise exception.NoValidHost(reason='')
|
|
return dests
|
|
|
|
def schedule_run_instance(self, context, request_spec,
|
|
admin_password, injected_files,
|
|
requested_networks, is_first_time,
|
|
filter_properties, legacy_bdm_in_spec):
|
|
"""Create and run an instance or instances."""
|
|
instance_uuids = request_spec.get('instance_uuids')
|
|
for num, instance_uuid in enumerate(instance_uuids):
|
|
request_spec['instance_properties']['launch_index'] = num
|
|
try:
|
|
host = self._schedule(context, CONF.compute_topic,
|
|
request_spec, filter_properties)
|
|
updated_instance = driver.instance_update_db(context,
|
|
instance_uuid)
|
|
self.compute_rpcapi.run_instance(context,
|
|
instance=updated_instance, host=host,
|
|
requested_networks=requested_networks,
|
|
injected_files=injected_files,
|
|
admin_password=admin_password,
|
|
is_first_time=is_first_time,
|
|
request_spec=request_spec,
|
|
filter_properties=filter_properties,
|
|
legacy_bdm_in_spec=legacy_bdm_in_spec)
|
|
except Exception as ex:
|
|
# NOTE(vish): we don't reraise the exception here to make sure
|
|
# that all instances in the request get set to
|
|
# error properly
|
|
driver.handle_schedule_error(context, ex, instance_uuid,
|
|
request_spec)
|