compute_api create*() and schedulers refactoring

Fixes bug 844160

Makes the servers create API call work with all schedulers, removes
'zone boot', and folds create_instance_helper back into servers
controller.

Notable changes:
1) compute API's create_at_all_once has been removed. It was only used
by zone boot.
2) compute API's create() no longer creates Instance DB entries. The
schedulers now do this. This makes sense, as only the schedulers will
know where the instances will be placed. They could be placed locally or
in a child zone. However, this comes at a cost. compute_api.create() now
does a 'call' to the scheduler instead of a 'cast' in most cases (* see
below). This is so it can receive the instance ID(s) that were created
back from the scheduler. Ultimately, we probably need to figure out a
way to generate UUIDs before scheduling and return only the information
we know about an instance before it is actually scheduled and created.
We could then revert this back to a cast. (Or maybe we always return a
reservation ID instead of an instance.)
3) scheduler* calls do not return a host now.  They return a value
that'll be returned if the caller does an rpc.call().  The casts to
hosts are now done by the scheduler drivers themselves.
4) There's been an undocumented feature in the OS API to allow multiple
instances to be built. I've kept it.
5) If compute_api.create() is creating multiple instances, only a single
call is made to the scheduler, vs the old way of sending many casts. All
schedulers now check how many instances have been requested.
6) I've added an undocumented option 'return_reservation_id' when
building. If set to True, only a reservation ID is returned to the API
caller, not the instance. This essentially gives you the old 'nova
zone-boot' functionality.
7) It was requested I create a stub for a zones extension, so you'll see
the empty extension in here. We'll move some code to it later.
8) Fixes an unrelated bug that merged into trunk recently where zones DB
calls were not being done with admin context always, anymore.
9) Scheduler calls were always done with admin context when they should
elevate only when needed.
10) Moved stub_network flag so individual tests can run again.

* Case #6 above doesn't wait for the scheduler response with instance
IDs. It does a 'cast' instead.

Change-Id: Ic040780a2e86d7330e225f14056dadbaa9fb3c7e
This commit is contained in:
Chris Behrens
2011-09-24 23:49:36 +00:00
parent 0f0f02e137
commit b467e7095c
12 changed files with 319 additions and 169 deletions

View File

@@ -438,3 +438,6 @@ DEFINE_list('monkey_patch_modules',
DEFINE_bool('allow_resize_to_same_host', False, DEFINE_bool('allow_resize_to_same_host', False,
'Allow destination machine to match source for resize. Useful' 'Allow destination machine to match source for resize. Useful'
' when testing in environments with only one host machine.') ' when testing in environments with only one host machine.')
DEFINE_string('stub_network', False,
'Stub network related code')

View File

@@ -60,24 +60,10 @@ class AbstractScheduler(driver.Scheduler):
request_spec, kwargs): request_spec, kwargs):
"""Create the requested resource in this Zone.""" """Create the requested resource in this Zone."""
host = build_plan_item['hostname'] host = build_plan_item['hostname']
base_options = request_spec['instance_properties'] instance = self.create_instance_db_entry(context, request_spec)
image = request_spec['image'] driver.cast_to_compute_host(context, host,
instance_type = request_spec.get('instance_type') 'run_instance', instance_id=instance['id'], **kwargs)
return driver.encode_instance(instance, local=True)
# TODO(sandy): I guess someone needs to add block_device_mapping
# support at some point? Also, OS API has no concept of security
# groups.
instance = compute_api.API().create_db_entry_for_new_instance(context,
instance_type, image, base_options, None, [])
instance_id = instance['id']
kwargs['instance_id'] = instance_id
queue = db.queue_get_for(context, "compute", host)
params = {"method": "run_instance", "args": kwargs}
rpc.cast(context, queue, params)
LOG.debug(_("Provisioning locally via compute node %(host)s")
% locals())
def _decrypt_blob(self, blob): def _decrypt_blob(self, blob):
"""Returns the decrypted blob or None if invalid. Broken out """Returns the decrypted blob or None if invalid. Broken out
@@ -112,7 +98,7 @@ class AbstractScheduler(driver.Scheduler):
files = kwargs['injected_files'] files = kwargs['injected_files']
child_zone = zone_info['child_zone'] child_zone = zone_info['child_zone']
child_blob = zone_info['child_blob'] child_blob = zone_info['child_blob']
zone = db.zone_get(context, child_zone) zone = db.zone_get(context.elevated(), child_zone)
url = zone.api_url url = zone.api_url
LOG.debug(_("Forwarding instance create call to child zone %(url)s" LOG.debug(_("Forwarding instance create call to child zone %(url)s"
". ReservationID=%(reservation_id)s") % locals()) ". ReservationID=%(reservation_id)s") % locals())
@@ -132,12 +118,13 @@ class AbstractScheduler(driver.Scheduler):
# arguments are passed as keyword arguments # arguments are passed as keyword arguments
# (there's a reasonable default for ipgroups in the # (there's a reasonable default for ipgroups in the
# novaclient call). # novaclient call).
nova.servers.create(name, image_ref, flavor_id, instance = nova.servers.create(name, image_ref, flavor_id,
meta=meta, files=files, zone_blob=child_blob, meta=meta, files=files, zone_blob=child_blob,
reservation_id=reservation_id) reservation_id=reservation_id)
return driver.encode_instance(instance._info, local=False)
def _provision_resource_from_blob(self, context, build_plan_item, def _provision_resource_from_blob(self, context, build_plan_item,
instance_id, request_spec, kwargs): request_spec, kwargs):
"""Create the requested resource locally or in a child zone """Create the requested resource locally or in a child zone
based on what is stored in the zone blob info. based on what is stored in the zone blob info.
@@ -165,21 +152,21 @@ class AbstractScheduler(driver.Scheduler):
# Valid data ... is it for us? # Valid data ... is it for us?
if 'child_zone' in host_info and 'child_blob' in host_info: if 'child_zone' in host_info and 'child_blob' in host_info:
self._ask_child_zone_to_create_instance(context, host_info, instance = self._ask_child_zone_to_create_instance(context,
request_spec, kwargs) host_info, request_spec, kwargs)
else: else:
self._provision_resource_locally(context, host_info, request_spec, instance = self._provision_resource_locally(context,
kwargs) host_info, request_spec, kwargs)
return instance
def _provision_resource(self, context, build_plan_item, instance_id, def _provision_resource(self, context, build_plan_item,
request_spec, kwargs): request_spec, kwargs):
"""Create the requested resource in this Zone or a child zone.""" """Create the requested resource in this Zone or a child zone."""
if "hostname" in build_plan_item: if "hostname" in build_plan_item:
self._provision_resource_locally(context, build_plan_item, return self._provision_resource_locally(context,
request_spec, kwargs) build_plan_item, request_spec, kwargs)
return return self._provision_resource_from_blob(context,
self._provision_resource_from_blob(context, build_plan_item, build_plan_item, request_spec, kwargs)
instance_id, request_spec, kwargs)
def _adjust_child_weights(self, child_results, zones): def _adjust_child_weights(self, child_results, zones):
"""Apply the Scale and Offset values from the Zone definition """Apply the Scale and Offset values from the Zone definition
@@ -205,8 +192,7 @@ class AbstractScheduler(driver.Scheduler):
LOG.exception(_("Bad child zone scaling values " LOG.exception(_("Bad child zone scaling values "
"for Zone: %(zone_id)s") % locals()) "for Zone: %(zone_id)s") % locals())
def schedule_run_instance(self, context, instance_id, request_spec, def schedule_run_instance(self, context, request_spec, *args, **kwargs):
*args, **kwargs):
"""This method is called from nova.compute.api to provision """This method is called from nova.compute.api to provision
an instance. However we need to look at the parameters being an instance. However we need to look at the parameters being
passed in to see if this is a request to: passed in to see if this is a request to:
@@ -214,13 +200,16 @@ class AbstractScheduler(driver.Scheduler):
2. Use the Build Plan information in the request parameters 2. Use the Build Plan information in the request parameters
to simply create the instance (either in this zone or to simply create the instance (either in this zone or
a child zone). a child zone).
returns list of instances created.
""" """
# TODO(sandy): We'll have to look for richer specs at some point. # TODO(sandy): We'll have to look for richer specs at some point.
blob = request_spec.get('blob') blob = request_spec.get('blob')
if blob: if blob:
self._provision_resource(context, request_spec, instance_id, instance = self._provision_resource(context,
request_spec, kwargs) request_spec, request_spec, kwargs)
return None # Caller expects a list of instances
return [instance]
num_instances = request_spec.get('num_instances', 1) num_instances = request_spec.get('num_instances', 1)
LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
@@ -231,16 +220,16 @@ class AbstractScheduler(driver.Scheduler):
if not build_plan: if not build_plan:
raise driver.NoValidHost(_('No hosts were available')) raise driver.NoValidHost(_('No hosts were available'))
instances = []
for num in xrange(num_instances): for num in xrange(num_instances):
if not build_plan: if not build_plan:
break break
build_plan_item = build_plan.pop(0) build_plan_item = build_plan.pop(0)
self._provision_resource(context, build_plan_item, instance_id, instance = self._provision_resource(context,
request_spec, kwargs) build_plan_item, request_spec, kwargs)
instances.append(instance)
# Returning None short-circuits the routing to Compute (since return instances
# we've already done it here)
return None
def select(self, context, request_spec, *args, **kwargs): def select(self, context, request_spec, *args, **kwargs):
"""Select returns a list of weights and zone/host information """Select returns a list of weights and zone/host information
@@ -251,7 +240,7 @@ class AbstractScheduler(driver.Scheduler):
return self._schedule(context, "compute", request_spec, return self._schedule(context, "compute", request_spec,
*args, **kwargs) *args, **kwargs)
def schedule(self, context, topic, request_spec, *args, **kwargs): def schedule(self, context, topic, method, *args, **kwargs):
"""The schedule() contract requires we return the one """The schedule() contract requires we return the one
best-suited host for this request. best-suited host for this request.
""" """
@@ -285,7 +274,7 @@ class AbstractScheduler(driver.Scheduler):
weighted_hosts = self.weigh_hosts(topic, request_spec, filtered_hosts) weighted_hosts = self.weigh_hosts(topic, request_spec, filtered_hosts)
# Next, tack on the host weights from the child zones # Next, tack on the host weights from the child zones
json_spec = json.dumps(request_spec) json_spec = json.dumps(request_spec)
all_zones = db.zone_get_all(context) all_zones = db.zone_get_all(context.elevated())
child_results = self._call_zone_method(context, "select", child_results = self._call_zone_method(context, "select",
specs=json_spec, zones=all_zones) specs=json_spec, zones=all_zones)
self._adjust_child_weights(child_results, all_zones) self._adjust_child_weights(child_results, all_zones)

View File

@@ -65,7 +65,7 @@ def get_zone_list(context):
for item in items: for item in items:
item['api_url'] = item['api_url'].replace('\\/', '/') item['api_url'] = item['api_url'].replace('\\/', '/')
if not items: if not items:
items = db.zone_get_all(context) items = db.zone_get_all(context.elevated())
return items return items
@@ -116,7 +116,7 @@ def call_zone_method(context, method_name, errors_to_ignore=None,
pool = greenpool.GreenPool() pool = greenpool.GreenPool()
results = [] results = []
if zones is None: if zones is None:
zones = db.zone_get_all(context) zones = db.zone_get_all(context.elevated())
for zone in zones: for zone in zones:
try: try:
# Do this on behalf of the user ... # Do this on behalf of the user ...

View File

@@ -160,8 +160,7 @@ class LeastCostScheduler(base_scheduler.BaseScheduler):
weighted = [] weighted = []
weight_log = [] weight_log = []
for cost, (hostname, service) in zip(costs, hosts): for cost, (hostname, caps) in zip(costs, hosts):
caps = service[topic]
weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
weight_dict = dict(weight=cost, hostname=hostname, weight_dict = dict(weight=cost, hostname=hostname,
capabilities=caps) capabilities=caps)

View File

@@ -38,7 +38,8 @@ flags.DEFINE_string('volume_scheduler_driver',
# A mapping of methods to topics so we can figure out which driver to use. # A mapping of methods to topics so we can figure out which driver to use.
_METHOD_MAP = {'run_instance': 'compute', _METHOD_MAP = {'run_instance': 'compute',
'start_instance': 'compute', 'start_instance': 'compute',
'create_volume': 'volume'} 'create_volume': 'volume',
'create_volumes': 'volume'}
class MultiScheduler(driver.Scheduler): class MultiScheduler(driver.Scheduler):
@@ -69,5 +70,6 @@ class MultiScheduler(driver.Scheduler):
for k, v in self.drivers.iteritems(): for k, v in self.drivers.iteritems():
v.set_zone_manager(zone_manager) v.set_zone_manager(zone_manager)
def schedule(self, context, topic, *_args, **_kwargs): def schedule(self, context, topic, method, *_args, **_kwargs):
return self.drivers[topic].schedule(context, topic, *_args, **_kwargs) return self.drivers[topic].schedule(context, topic,
method, *_args, **_kwargs)

View File

@@ -39,47 +39,50 @@ flags.DEFINE_integer("max_networks", 1000,
class SimpleScheduler(chance.ChanceScheduler): class SimpleScheduler(chance.ChanceScheduler):
"""Implements Naive Scheduler that tries to find least loaded host.""" """Implements Naive Scheduler that tries to find least loaded host."""
def _schedule_instance(self, context, instance_id, *_args, **_kwargs): def _schedule_instance(self, context, instance_opts, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances.""" """Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id)
if (instance_ref['availability_zone'] availability_zone = instance_opts.get('availability_zone')
and ':' in instance_ref['availability_zone']
and context.is_admin): if availability_zone and context.is_admin and \
zone, _x, host = instance_ref['availability_zone'].partition(':') (':' in availability_zone):
zone, host = availability_zone.split(':', 1)
service = db.service_get_by_args(context.elevated(), host, service = db.service_get_by_args(context.elevated(), host,
'nova-compute') 'nova-compute')
if not self.service_is_up(service): if not self.service_is_up(service):
raise driver.WillNotSchedule(_("Host %s is not alive") % host) raise driver.WillNotSchedule(_("Host %s is not alive") % host)
# TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = utils.utcnow()
db.instance_update(context, instance_id, {'host': host,
'scheduled_at': now})
return host return host
results = db.service_get_all_compute_sorted(context) results = db.service_get_all_compute_sorted(context)
for result in results: for result in results:
(service, instance_cores) = result (service, instance_cores) = result
if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores: if instance_cores + instance_opts['vcpus'] > FLAGS.max_cores:
raise driver.NoValidHost(_("All hosts have too many cores")) raise driver.NoValidHost(_("All hosts have too many cores"))
if self.service_is_up(service): if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = utils.utcnow()
db.instance_update(context,
instance_id,
{'host': service['host'],
'scheduled_at': now})
return service['host'] return service['host']
raise driver.NoValidHost(_("Scheduler was unable to locate a host" raise driver.NoValidHost(_("Scheduler was unable to locate a host"
" for this request. Is the appropriate" " for this request. Is the appropriate"
" service running?")) " service running?"))
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): def schedule_run_instance(self, context, request_spec, *_args, **_kwargs):
return self._schedule_instance(context, instance_id, *_args, **_kwargs) num_instances = request_spec.get('num_instances', 1)
instances = []
for num in xrange(num_instances):
host = self._schedule_instance(context,
request_spec['instance_properties'], *_args, **_kwargs)
instance_ref = self.create_instance_db_entry(context,
request_spec)
driver.cast_to_compute_host(context, host, 'run_instance',
instance_id=instance_ref['id'], **_kwargs)
instances.append(driver.encode_instance(instance_ref))
return instances
def schedule_start_instance(self, context, instance_id, *_args, **_kwargs): def schedule_start_instance(self, context, instance_id, *_args, **_kwargs):
return self._schedule_instance(context, instance_id, *_args, **_kwargs) instance_ref = db.instance_get(context, instance_id)
host = self._schedule_instance(context, instance_ref,
*_args, **_kwargs)
driver.cast_to_compute_host(context, host, 'start_instance',
instance_id=intance_id, **_kwargs)
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes.""" """Picks a host that is up and has the fewest volumes."""
@@ -92,13 +95,9 @@ class SimpleScheduler(chance.ChanceScheduler):
'nova-volume') 'nova-volume')
if not self.service_is_up(service): if not self.service_is_up(service):
raise driver.WillNotSchedule(_("Host %s not available") % host) raise driver.WillNotSchedule(_("Host %s not available") % host)
driver.cast_to_volume_host(context, host, 'create_volume',
# TODO(vish): this probably belongs in the manager, if we volume_id=volume_id, **_kwargs)
# can generalize this somehow return None
now = utils.utcnow()
db.volume_update(context, volume_id, {'host': host,
'scheduled_at': now})
return host
results = db.service_get_all_volume_sorted(context) results = db.service_get_all_volume_sorted(context)
for result in results: for result in results:
(service, volume_gigabytes) = result (service, volume_gigabytes) = result
@@ -106,14 +105,9 @@ class SimpleScheduler(chance.ChanceScheduler):
raise driver.NoValidHost(_("All hosts have too many " raise driver.NoValidHost(_("All hosts have too many "
"gigabytes")) "gigabytes"))
if self.service_is_up(service): if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we driver.cast_to_volume_host(context, service['host'],
# can generalize this somehow 'create_volume', volume_id=volume_id, **_kwargs)
now = utils.utcnow() return None
db.volume_update(context,
volume_id,
{'host': service['host'],
'scheduled_at': now})
return service['host']
raise driver.NoValidHost(_("Scheduler was unable to locate a host" raise driver.NoValidHost(_("Scheduler was unable to locate a host"
" for this request. Is the appropriate" " for this request. Is the appropriate"
" service running?")) " service running?"))
@@ -127,7 +121,9 @@ class SimpleScheduler(chance.ChanceScheduler):
if instance_count >= FLAGS.max_networks: if instance_count >= FLAGS.max_networks:
raise driver.NoValidHost(_("All hosts have too many networks")) raise driver.NoValidHost(_("All hosts have too many networks"))
if self.service_is_up(service): if self.service_is_up(service):
return service['host'] driver.cast_to_network_host(context, service['host'],
'set_network_host', **_kwargs)
return None
raise driver.NoValidHost(_("Scheduler was unable to locate a host" raise driver.NoValidHost(_("Scheduler was unable to locate a host"
" for this request. Is the appropriate" " for this request. Is the appropriate"
" service running?")) " service running?"))

View File

@@ -195,8 +195,6 @@ class VsaScheduler(simple.SimpleScheduler):
'display_description': vol['description'], 'display_description': vol['description'],
'volume_type_id': vol['volume_type_id'], 'volume_type_id': vol['volume_type_id'],
'metadata': dict(to_vsa_id=vsa_id), 'metadata': dict(to_vsa_id=vsa_id),
'host': vol['host'],
'scheduled_at': now
} }
size = vol['size'] size = vol['size']
@@ -205,12 +203,10 @@ class VsaScheduler(simple.SimpleScheduler):
LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\ LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
"host %(host)s"), locals()) "host %(host)s"), locals())
volume_ref = db.volume_create(context, options) volume_ref = db.volume_create(context.elevated(), options)
rpc.cast(context, driver.cast_to_volume_host(context, vol['host'],
db.queue_get_for(context, "volume", vol['host']), 'create_volume', volume_id=volume_ref['id'],
{"method": "create_volume", snapshot_id=None)
"args": {"volume_id": volume_ref['id'],
"snapshot_id": None}})
def _check_host_enforcement(self, context, availability_zone): def _check_host_enforcement(self, context, availability_zone):
if (availability_zone if (availability_zone
@@ -274,7 +270,6 @@ class VsaScheduler(simple.SimpleScheduler):
def schedule_create_volumes(self, context, request_spec, def schedule_create_volumes(self, context, request_spec,
availability_zone=None, *_args, **_kwargs): availability_zone=None, *_args, **_kwargs):
"""Picks hosts for hosting multiple volumes.""" """Picks hosts for hosting multiple volumes."""
num_volumes = request_spec.get('num_volumes') num_volumes = request_spec.get('num_volumes')
LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") % LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") %
locals()) locals())
@@ -291,7 +286,8 @@ class VsaScheduler(simple.SimpleScheduler):
for vol in volume_params: for vol in volume_params:
self._provision_volume(context, vol, vsa_id, availability_zone) self._provision_volume(context, vol, vsa_id, availability_zone)
except: except Exception:
LOG.exception(_("Error creating volumes"))
if vsa_id: if vsa_id:
db.vsa_update(context, vsa_id, dict(status=VsaState.FAILED)) db.vsa_update(context, vsa_id, dict(status=VsaState.FAILED))
@@ -310,10 +306,9 @@ class VsaScheduler(simple.SimpleScheduler):
host = self._check_host_enforcement(context, host = self._check_host_enforcement(context,
volume_ref['availability_zone']) volume_ref['availability_zone'])
if host: if host:
now = utils.utcnow() driver.cast_to_volume_host(context, host, 'create_volume',
db.volume_update(context, volume_id, {'host': host, volume_id=volume_id, **_kwargs)
'scheduled_at': now}) return None
return host
volume_type_id = volume_ref['volume_type_id'] volume_type_id = volume_ref['volume_type_id']
if volume_type_id: if volume_type_id:
@@ -344,18 +339,16 @@ class VsaScheduler(simple.SimpleScheduler):
try: try:
(host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts) (host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts)
except: except Exception:
LOG.exception(_("Error creating volume"))
if volume_ref['to_vsa_id']: if volume_ref['to_vsa_id']:
db.vsa_update(context, volume_ref['to_vsa_id'], db.vsa_update(context, volume_ref['to_vsa_id'],
dict(status=VsaState.FAILED)) dict(status=VsaState.FAILED))
raise raise
if host: if host:
now = utils.utcnow() driver.cast_to_volume_host(context, host, 'create_volume',
db.volume_update(context, volume_id, {'host': host, volume_id=volume_id, **_kwargs)
'scheduled_at': now})
self._consume_resource(qos_cap, volume_ref['size'], -1)
return host
def _consume_full_drive(self, qos_values, direction): def _consume_full_drive(self, qos_values, direction):
qos_values['FullDrive']['NumFreeDrives'] += direction qos_values['FullDrive']['NumFreeDrives'] += direction

View File

@@ -35,7 +35,7 @@ class ZoneScheduler(driver.Scheduler):
for topic and availability zone (if defined). for topic and availability zone (if defined).
""" """
if zone is None: if not zone:
return self.hosts_up(context, topic) return self.hosts_up(context, topic)
services = db.service_get_all_by_topic(context, topic) services = db.service_get_all_by_topic(context, topic)
@@ -44,16 +44,34 @@ class ZoneScheduler(driver.Scheduler):
if self.service_is_up(service) if self.service_is_up(service)
and service.availability_zone == zone] and service.availability_zone == zone]
def schedule(self, context, topic, *_args, **_kwargs): def _schedule(self, context, topic, request_spec, **kwargs):
"""Picks a host that is up at random in selected """Picks a host that is up at random in selected
availability zone (if defined). availability zone (if defined).
""" """
zone = _kwargs.get('availability_zone') zone = kwargs.get('availability_zone')
hosts = self.hosts_up_with_zone(context, topic, zone) if not zone and request_spec:
zone = request_spec['instance_properties'].get(
'availability_zone')
hosts = self.hosts_up_with_zone(context.elevated(), topic, zone)
if not hosts: if not hosts:
raise driver.NoValidHost(_("Scheduler was unable to locate a host" raise driver.NoValidHost(_("Scheduler was unable to locate a host"
" for this request. Is the appropriate" " for this request. Is the appropriate"
" service running?")) " service running?"))
return hosts[int(random.random() * len(hosts))] return hosts[int(random.random() * len(hosts))]
def schedule(self, context, topic, method, *_args, **kwargs):
host = self._schedule(context, topic, None, **kwargs)
driver.cast_to_host(context, topic, host, method, **kwargs)
def schedule_run_instance(self, context, request_spec, *_args, **kwargs):
"""Builds and starts instances on selected hosts"""
num_instances = request_spec.get('num_instances', 1)
instances = []
for num in xrange(num_instances):
host = self._schedule(context, 'compute', request_spec, **kwargs)
instance = self.create_instance_db_entry(context, request_spec)
driver.cast_to_compute_host(context, host,
'run_instance', instance_id=instance['id'], **kwargs)
instances.append(driver.encode_instance(instance))
return instances

View File

@@ -20,6 +20,7 @@ import json
import nova.db import nova.db
from nova import context
from nova import exception from nova import exception
from nova import rpc from nova import rpc
from nova import test from nova import test
@@ -102,7 +103,7 @@ def fake_empty_call_zone_method(context, method, specs, zones):
was_called = False was_called = False
def fake_provision_resource(context, item, instance_id, request_spec, kwargs): def fake_provision_resource(context, item, request_spec, kwargs):
global was_called global was_called
was_called = True was_called = True
@@ -118,8 +119,7 @@ def fake_provision_resource_locally(context, build_plan, request_spec, kwargs):
was_called = True was_called = True
def fake_provision_resource_from_blob(context, item, instance_id, def fake_provision_resource_from_blob(context, item, request_spec, kwargs):
request_spec, kwargs):
global was_called global was_called
was_called = True was_called = True
@@ -185,7 +185,7 @@ class AbstractSchedulerTestCase(test.TestCase):
zm = FakeZoneManager() zm = FakeZoneManager()
sched.set_zone_manager(zm) sched.set_zone_manager(zm)
fake_context = {} fake_context = context.RequestContext('user', 'project')
build_plan = sched.select(fake_context, build_plan = sched.select(fake_context,
{'instance_type': {'memory_mb': 512}, {'instance_type': {'memory_mb': 512},
'num_instances': 4}) 'num_instances': 4})
@@ -229,9 +229,10 @@ class AbstractSchedulerTestCase(test.TestCase):
zm = FakeEmptyZoneManager() zm = FakeEmptyZoneManager()
sched.set_zone_manager(zm) sched.set_zone_manager(zm)
fake_context = {} fake_context = context.RequestContext('user', 'project')
request_spec = {}
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
fake_context, 1, fake_context, request_spec,
dict(host_filter=None, instance_type={})) dict(host_filter=None, instance_type={}))
def test_schedule_do_not_schedule_with_hint(self): def test_schedule_do_not_schedule_with_hint(self):
@@ -250,8 +251,8 @@ class AbstractSchedulerTestCase(test.TestCase):
'blob': "Non-None blob data", 'blob': "Non-None blob data",
} }
result = sched.schedule_run_instance(None, 1, request_spec) instances = sched.schedule_run_instance(None, request_spec)
self.assertEquals(None, result) self.assertTrue(instances)
self.assertTrue(was_called) self.assertTrue(was_called)
def test_provision_resource_local(self): def test_provision_resource_local(self):
@@ -263,7 +264,7 @@ class AbstractSchedulerTestCase(test.TestCase):
fake_provision_resource_locally) fake_provision_resource_locally)
request_spec = {'hostname': "foo"} request_spec = {'hostname': "foo"}
sched._provision_resource(None, request_spec, 1, request_spec, {}) sched._provision_resource(None, request_spec, request_spec, {})
self.assertTrue(was_called) self.assertTrue(was_called)
def test_provision_resource_remote(self): def test_provision_resource_remote(self):
@@ -275,7 +276,7 @@ class AbstractSchedulerTestCase(test.TestCase):
fake_provision_resource_from_blob) fake_provision_resource_from_blob)
request_spec = {} request_spec = {}
sched._provision_resource(None, request_spec, 1, request_spec, {}) sched._provision_resource(None, request_spec, request_spec, {})
self.assertTrue(was_called) self.assertTrue(was_called)
def test_provision_resource_from_blob_empty(self): def test_provision_resource_from_blob_empty(self):
@@ -285,7 +286,7 @@ class AbstractSchedulerTestCase(test.TestCase):
request_spec = {} request_spec = {}
self.assertRaises(abstract_scheduler.InvalidBlob, self.assertRaises(abstract_scheduler.InvalidBlob,
sched._provision_resource_from_blob, sched._provision_resource_from_blob,
None, {}, 1, {}, {}) None, {}, {}, {})
def test_provision_resource_from_blob_with_local_blob(self): def test_provision_resource_from_blob_with_local_blob(self):
""" """
@@ -303,20 +304,21 @@ class AbstractSchedulerTestCase(test.TestCase):
# return fake instances # return fake instances
return {'id': 1, 'uuid': 'f874093c-7b17-49c0-89c3-22a5348497f9'} return {'id': 1, 'uuid': 'f874093c-7b17-49c0-89c3-22a5348497f9'}
def fake_rpc_cast(*args, **kwargs): def fake_cast_to_compute_host(*args, **kwargs):
pass pass
self.stubs.Set(sched, '_decrypt_blob', self.stubs.Set(sched, '_decrypt_blob',
fake_decrypt_blob_returns_local_info) fake_decrypt_blob_returns_local_info)
self.stubs.Set(driver, 'cast_to_compute_host',
fake_cast_to_compute_host)
self.stubs.Set(compute_api.API, self.stubs.Set(compute_api.API,
'create_db_entry_for_new_instance', 'create_db_entry_for_new_instance',
fake_create_db_entry_for_new_instance) fake_create_db_entry_for_new_instance)
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
build_plan_item = {'blob': "Non-None blob data"} build_plan_item = {'blob': "Non-None blob data"}
request_spec = {'image': {}, 'instance_properties': {}} request_spec = {'image': {}, 'instance_properties': {}}
sched._provision_resource_from_blob(None, build_plan_item, 1, sched._provision_resource_from_blob(None, build_plan_item,
request_spec, {}) request_spec, {})
self.assertTrue(was_called) self.assertTrue(was_called)
@@ -335,7 +337,7 @@ class AbstractSchedulerTestCase(test.TestCase):
request_spec = {'blob': "Non-None blob data"} request_spec = {'blob': "Non-None blob data"}
sched._provision_resource_from_blob(None, request_spec, 1, sched._provision_resource_from_blob(None, request_spec,
request_spec, {}) request_spec, {})
self.assertTrue(was_called) self.assertTrue(was_called)
@@ -352,7 +354,7 @@ class AbstractSchedulerTestCase(test.TestCase):
request_spec = {'child_blob': True, 'child_zone': True} request_spec = {'child_blob': True, 'child_zone': True}
sched._provision_resource_from_blob(None, request_spec, 1, sched._provision_resource_from_blob(None, request_spec,
request_spec, {}) request_spec, {})
self.assertTrue(was_called) self.assertTrue(was_called)
@@ -386,7 +388,7 @@ class AbstractSchedulerTestCase(test.TestCase):
zm.service_states = {} zm.service_states = {}
sched.set_zone_manager(zm) sched.set_zone_manager(zm)
fake_context = {} fake_context = context.RequestContext('user', 'project')
build_plan = sched.select(fake_context, build_plan = sched.select(fake_context,
{'instance_type': {'memory_mb': 512}, {'instance_type': {'memory_mb': 512},
'num_instances': 4}) 'num_instances': 4})
@@ -394,6 +396,45 @@ class AbstractSchedulerTestCase(test.TestCase):
# 0 from local zones, 12 from remotes # 0 from local zones, 12 from remotes
self.assertEqual(12, len(build_plan)) self.assertEqual(12, len(build_plan))
def test_run_instance_non_admin(self):
"""Test creating an instance locally using run_instance, passing
a non-admin context. DB actions should work."""
sched = FakeAbstractScheduler()
def fake_cast_to_compute_host(*args, **kwargs):
pass
def fake_zone_get_all_zero(context):
# make sure this is called with admin context, even though
# we're using user context below
self.assertTrue(context.is_admin)
return []
self.stubs.Set(driver, 'cast_to_compute_host',
fake_cast_to_compute_host)
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all_zero)
zm = FakeZoneManager()
sched.set_zone_manager(zm)
fake_context = context.RequestContext('user', 'project')
request_spec = {
'image': {'properties': {}},
'security_group': [],
'instance_properties': {
'project_id': fake_context.project_id,
'user_id': fake_context.user_id},
'instance_type': {'memory_mb': 256},
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter'
}
instances = sched.schedule_run_instance(fake_context, request_spec)
self.assertEqual(len(instances), 1)
self.assertFalse(instances[0].get('_is_precooked', False))
nova.db.instance_destroy(fake_context, instances[0]['id'])
class BaseSchedulerTestCase(test.TestCase): class BaseSchedulerTestCase(test.TestCase):
"""Test case for Base Scheduler.""" """Test case for Base Scheduler."""

View File

@@ -134,7 +134,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
expected = [] expected = []
for idx, (hostname, services) in enumerate(hosts): for idx, (hostname, services) in enumerate(hosts):
caps = copy.deepcopy(services["compute"]) caps = copy.deepcopy(services)
# Costs are normalized so over 10 hosts, each host with increasing # Costs are normalized so over 10 hosts, each host with increasing
# free ram will cost 1/N more. Since the lowest cost host has some # free ram will cost 1/N more. Since the lowest cost host has some
# free ram, we add in the 1/N for the base_cost # free ram, we add in the 1/N for the base_cost

View File

@@ -22,6 +22,7 @@ from nova import db
from nova import exception from nova import exception
from nova import flags from nova import flags
from nova import log as logging from nova import log as logging
from nova import rpc
from nova import test from nova import test
from nova import utils from nova import utils
from nova.volume import volume_types from nova.volume import volume_types
@@ -37,6 +38,10 @@ scheduled_volume = {}
global_volume = {} global_volume = {}
def fake_rpc_cast(*args, **kwargs):
pass
class FakeVsaLeastUsedScheduler( class FakeVsaLeastUsedScheduler(
vsa_sched.VsaSchedulerLeastUsedHost): vsa_sched.VsaSchedulerLeastUsedHost):
# No need to stub anything at the moment # No need to stub anything at the moment
@@ -170,12 +175,10 @@ class VsaSchedulerTestCase(test.TestCase):
LOG.debug(_("Test: provision vol %(name)s on host %(host)s"), LOG.debug(_("Test: provision vol %(name)s on host %(host)s"),
locals()) locals())
LOG.debug(_("\t vol=%(vol)s"), locals()) LOG.debug(_("\t vol=%(vol)s"), locals())
pass
def _fake_vsa_update(self, context, vsa_id, values): def _fake_vsa_update(self, context, vsa_id, values):
LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\ LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\
"values=%(values)s"), locals()) "values=%(values)s"), locals())
pass
def _fake_volume_create(self, context, options): def _fake_volume_create(self, context, options):
LOG.debug(_("Test: Volume create: %s"), options) LOG.debug(_("Test: Volume create: %s"), options)
@@ -196,7 +199,6 @@ class VsaSchedulerTestCase(test.TestCase):
"values=%(values)s"), locals()) "values=%(values)s"), locals())
global scheduled_volume global scheduled_volume
scheduled_volume = {'id': volume_id, 'host': values['host']} scheduled_volume = {'id': volume_id, 'host': values['host']}
pass
def _fake_service_get_by_args(self, context, host, binary): def _fake_service_get_by_args(self, context, host, binary):
return "service" return "service"
@@ -209,7 +211,6 @@ class VsaSchedulerTestCase(test.TestCase):
def setUp(self, sched_class=None): def setUp(self, sched_class=None):
super(VsaSchedulerTestCase, self).setUp() super(VsaSchedulerTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.context = context.get_admin_context() self.context = context.get_admin_context()
if sched_class is None: if sched_class is None:
@@ -220,6 +221,7 @@ class VsaSchedulerTestCase(test.TestCase):
self.host_num = 10 self.host_num = 10
self.drive_type_num = 5 self.drive_type_num = 5
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
self.stubs.Set(self.sched, self.stubs.Set(self.sched,
'_get_service_states', self._fake_get_service_states) '_get_service_states', self._fake_get_service_states)
self.stubs.Set(self.sched, self.stubs.Set(self.sched,
@@ -234,8 +236,6 @@ class VsaSchedulerTestCase(test.TestCase):
def tearDown(self): def tearDown(self):
for name in self.created_types_lst: for name in self.created_types_lst:
volume_types.purge(self.context, name) volume_types.purge(self.context, name)
self.stubs.UnsetAll()
super(VsaSchedulerTestCase, self).tearDown() super(VsaSchedulerTestCase, self).tearDown()
def test_vsa_sched_create_volumes_simple(self): def test_vsa_sched_create_volumes_simple(self):
@@ -333,6 +333,8 @@ class VsaSchedulerTestCase(test.TestCase):
self.stubs.Set(self.sched, self.stubs.Set(self.sched,
'_get_service_states', self._fake_get_service_states) '_get_service_states', self._fake_get_service_states)
self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create) self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create)
self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
self.sched.schedule_create_volumes(self.context, self.sched.schedule_create_volumes(self.context,
request_spec, request_spec,
@@ -467,10 +469,9 @@ class VsaSchedulerTestCase(test.TestCase):
self.stubs.Set(self.sched, self.stubs.Set(self.sched,
'service_is_up', self._fake_service_is_up_True) 'service_is_up', self._fake_service_is_up_True)
host = self.sched.schedule_create_volume(self.context, self.sched.schedule_create_volume(self.context,
123, availability_zone=None) 123, availability_zone=None)
self.assertEqual(host, 'host_3')
self.assertEqual(scheduled_volume['id'], 123) self.assertEqual(scheduled_volume['id'], 123)
self.assertEqual(scheduled_volume['host'], 'host_3') self.assertEqual(scheduled_volume['host'], 'host_3')
@@ -514,10 +515,9 @@ class VsaSchedulerTestCase(test.TestCase):
global_volume['volume_type_id'] = volume_type['id'] global_volume['volume_type_id'] = volume_type['id']
global_volume['size'] = 0 global_volume['size'] = 0
host = self.sched.schedule_create_volume(self.context, self.sched.schedule_create_volume(self.context,
123, availability_zone=None) 123, availability_zone=None)
self.assertEqual(host, 'host_2')
self.assertEqual(scheduled_volume['id'], 123) self.assertEqual(scheduled_volume['id'], 123)
self.assertEqual(scheduled_volume['host'], 'host_2') self.assertEqual(scheduled_volume['host'], 'host_2')
@@ -529,7 +529,6 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
FakeVsaMostAvailCapacityScheduler()) FakeVsaMostAvailCapacityScheduler())
def tearDown(self): def tearDown(self):
self.stubs.UnsetAll()
super(VsaSchedulerTestCaseMostAvail, self).tearDown() super(VsaSchedulerTestCaseMostAvail, self).tearDown()
def test_vsa_sched_create_single_volume(self): def test_vsa_sched_create_single_volume(self):
@@ -558,10 +557,9 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
global_volume['volume_type_id'] = volume_type['id'] global_volume['volume_type_id'] = volume_type['id']
global_volume['size'] = 0 global_volume['size'] = 0
host = self.sched.schedule_create_volume(self.context, self.sched.schedule_create_volume(self.context,
123, availability_zone=None) 123, availability_zone=None)
self.assertEqual(host, 'host_9')
self.assertEqual(scheduled_volume['id'], 123) self.assertEqual(scheduled_volume['id'], 123)
self.assertEqual(scheduled_volume['host'], 'host_9') self.assertEqual(scheduled_volume['host'], 'host_9')

View File

@@ -28,6 +28,7 @@ from nova import db
from nova import exception from nova import exception
from nova import flags from nova import flags
from nova import log as logging from nova import log as logging
from nova.scheduler import driver as scheduler_driver
from nova import rpc from nova import rpc
from nova import test from nova import test
from nova import utils from nova import utils
@@ -56,6 +57,38 @@ class FakeTime(object):
self.counter += t self.counter += t
orig_rpc_call = rpc.call
orig_rpc_cast = rpc.cast
def rpc_call_wrapper(context, topic, msg, do_cast=True):
"""Stub out the scheduler creating the instance entry"""
if topic == FLAGS.scheduler_topic and \
msg['method'] == 'run_instance':
request_spec = msg['args']['request_spec']
scheduler = scheduler_driver.Scheduler
num_instances = request_spec.get('num_instances', 1)
instances = []
for x in xrange(num_instances):
instance = scheduler().create_instance_db_entry(
context, request_spec)
encoded = scheduler_driver.encode_instance(instance)
instances.append(encoded)
return instances
else:
if do_cast:
orig_rpc_cast(context, topic, msg)
else:
return orig_rpc_call(context, topic, msg)
def rpc_cast_wrapper(context, topic, msg):
"""Stub out the scheduler creating the instance entry in
the reservation_id case.
"""
rpc_call_wrapper(context, topic, msg, do_cast=True)
def nop_report_driver_status(self): def nop_report_driver_status(self):
pass pass
@@ -80,6 +113,8 @@ class ComputeTestCase(test.TestCase):
'properties': {'kernel_id': 1, 'ramdisk_id': 1}} 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
self.stubs.Set(rpc, 'cast', rpc_cast_wrapper)
def _create_instance(self, params=None): def _create_instance(self, params=None):
"""Create a test instance""" """Create a test instance"""
@@ -142,7 +177,7 @@ class ComputeTestCase(test.TestCase):
"""Verify that an instance cannot be created without a display_name.""" """Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)] cases = [dict(), dict(display_name=None)]
for instance in cases: for instance in cases:
ref = self.compute_api.create(self.context, (ref, resv_id) = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None, **instance) instance_types.get_default_instance_type(), None, **instance)
try: try:
self.assertNotEqual(ref[0]['display_name'], None) self.assertNotEqual(ref[0]['display_name'], None)
@@ -152,7 +187,7 @@ class ComputeTestCase(test.TestCase):
def test_create_instance_associates_security_groups(self): def test_create_instance_associates_security_groups(self):
"""Make sure create associates security groups""" """Make sure create associates security groups"""
group = self._create_group() group = self._create_group()
ref = self.compute_api.create( (ref, resv_id) = self.compute_api.create(
self.context, self.context,
instance_type=instance_types.get_default_instance_type(), instance_type=instance_types.get_default_instance_type(),
image_href=None, image_href=None,
@@ -212,7 +247,7 @@ class ComputeTestCase(test.TestCase):
('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'), ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
('hello_server', 'hello-server')] ('hello_server', 'hello-server')]
for display_name, hostname in cases: for display_name, hostname in cases:
ref = self.compute_api.create(self.context, (ref, resv_id) = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None, instance_types.get_default_instance_type(), None,
display_name=display_name) display_name=display_name)
try: try:
@@ -224,7 +259,7 @@ class ComputeTestCase(test.TestCase):
"""Make sure destroying disassociates security groups""" """Make sure destroying disassociates security groups"""
group = self._create_group() group = self._create_group()
ref = self.compute_api.create( (ref, resv_id) = self.compute_api.create(
self.context, self.context,
instance_type=instance_types.get_default_instance_type(), instance_type=instance_types.get_default_instance_type(),
image_href=None, image_href=None,
@@ -240,7 +275,7 @@ class ComputeTestCase(test.TestCase):
"""Make sure destroying security groups disassociates instances""" """Make sure destroying security groups disassociates instances"""
group = self._create_group() group = self._create_group()
ref = self.compute_api.create( (ref, resv_id) = self.compute_api.create(
self.context, self.context,
instance_type=instance_types.get_default_instance_type(), instance_type=instance_types.get_default_instance_type(),
image_href=None, image_href=None,
@@ -1398,6 +1433,84 @@ class ComputeTestCase(test.TestCase):
'swap'), 'swap'),
swap_size) swap_size)
def test_reservation_id_one_instance(self):
"""Verify building an instance has a reservation_id that
matches return value from create"""
(refs, resv_id) = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None)
try:
self.assertEqual(len(refs), 1)
self.assertEqual(refs[0]['reservation_id'], resv_id)
finally:
db.instance_destroy(self.context, refs[0]['id'])
def test_reservation_ids_two_instances(self):
"""Verify building 2 instances at once results in a
reservation_id being returned equal to reservation id set
in both instances
"""
(refs, resv_id) = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None,
min_count=2, max_count=2)
try:
self.assertEqual(len(refs), 2)
self.assertNotEqual(resv_id, None)
finally:
for instance in refs:
self.assertEqual(instance['reservation_id'], resv_id)
db.instance_destroy(self.context, instance['id'])
def test_reservation_ids_two_instances_no_wait(self):
"""Verify building 2 instances at once without waiting for
instance IDs results in a reservation_id being returned equal
to reservation id set in both instances
"""
(refs, resv_id) = self.compute_api.create(self.context,
instance_types.get_default_instance_type(), None,
min_count=2, max_count=2, wait_for_instances=False)
try:
self.assertEqual(refs, None)
self.assertNotEqual(resv_id, None)
finally:
instances = self.compute_api.get_all(self.context,
search_opts={'reservation_id': resv_id})
self.assertEqual(len(instances), 2)
for instance in instances:
self.assertEqual(instance['reservation_id'], resv_id)
db.instance_destroy(self.context, instance['id'])
def test_create_with_specified_reservation_id(self):
"""Verify building instances with a specified
reservation_id results in the correct reservation_id
being set
"""
# We need admin context to be able to specify our own
# reservation_ids.
context = self.context.elevated()
# 1 instance
(refs, resv_id) = self.compute_api.create(context,
instance_types.get_default_instance_type(), None,
min_count=1, max_count=1, reservation_id='meow')
try:
self.assertEqual(len(refs), 1)
self.assertEqual(resv_id, 'meow')
finally:
self.assertEqual(refs[0]['reservation_id'], resv_id)
db.instance_destroy(self.context, refs[0]['id'])
# 2 instances
(refs, resv_id) = self.compute_api.create(context,
instance_types.get_default_instance_type(), None,
min_count=2, max_count=2, reservation_id='woof')
try:
self.assertEqual(len(refs), 2)
self.assertEqual(resv_id, 'woof')
finally:
for instance in refs:
self.assertEqual(instance['reservation_id'], resv_id)
db.instance_destroy(self.context, instance['id'])
class ComputeTestMinRamMinDisk(test.TestCase): class ComputeTestMinRamMinDisk(test.TestCase):
def setUp(self): def setUp(self):
@@ -1405,6 +1518,8 @@ class ComputeTestMinRamMinDisk(test.TestCase):
self.compute = utils.import_object(FLAGS.compute_manager) self.compute = utils.import_object(FLAGS.compute_manager)
self.compute_api = compute.API() self.compute_api = compute.API()
self.context = context.RequestContext('fake', 'fake') self.context = context.RequestContext('fake', 'fake')
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
self.stubs.Set(rpc, 'cast', rpc_cast_wrapper)
self.fake_image = { self.fake_image = {
'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} 'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
@@ -1425,10 +1540,9 @@ class ComputeTestMinRamMinDisk(test.TestCase):
# Now increase the inst_type memory and make sure all is fine. # Now increase the inst_type memory and make sure all is fine.
inst_type['memory_mb'] = 2 inst_type['memory_mb'] = 2
ref = self.compute_api.create(self.context, inst_type, None) (refs, resv_id) = self.compute_api.create(self.context,
self.assertTrue(ref) inst_type, None)
db.instance_destroy(self.context, refs[0]['id'])
db.instance_destroy(self.context, ref[0]['id'])
def test_create_with_too_little_disk(self): def test_create_with_too_little_disk(self):
"""Test an instance type with too little disk space""" """Test an instance type with too little disk space"""
@@ -1447,10 +1561,9 @@ class ComputeTestMinRamMinDisk(test.TestCase):
# Now increase the inst_type disk space and make sure all is fine. # Now increase the inst_type disk space and make sure all is fine.
inst_type['local_gb'] = 2 inst_type['local_gb'] = 2
ref = self.compute_api.create(self.context, inst_type, None) (refs, resv_id) = self.compute_api.create(self.context,
self.assertTrue(ref) inst_type, None)
db.instance_destroy(self.context, refs[0]['id'])
db.instance_destroy(self.context, ref[0]['id'])
def test_create_just_enough_ram_and_disk(self): def test_create_just_enough_ram_and_disk(self):
"""Test an instance type with just enough ram and disk space""" """Test an instance type with just enough ram and disk space"""
@@ -1466,10 +1579,9 @@ class ComputeTestMinRamMinDisk(test.TestCase):
return img return img
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
ref = self.compute_api.create(self.context, inst_type, None) (refs, resv_id) = self.compute_api.create(self.context,
self.assertTrue(ref) inst_type, None)
db.instance_destroy(self.context, refs[0]['id'])
db.instance_destroy(self.context, ref[0]['id'])
def test_create_with_no_ram_and_disk_reqs(self): def test_create_with_no_ram_and_disk_reqs(self):
"""Test an instance type with no min_ram or min_disk""" """Test an instance type with no min_ram or min_disk"""
@@ -1482,7 +1594,6 @@ class ComputeTestMinRamMinDisk(test.TestCase):
return copy(self.fake_image) return copy(self.fake_image)
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
ref = self.compute_api.create(self.context, inst_type, None) (refs, resv_id) = self.compute_api.create(self.context,
self.assertTrue(ref) inst_type, None)
db.instance_destroy(self.context, refs[0]['id'])
db.instance_destroy(self.context, ref[0]['id'])