Merge "compute_api create*() and schedulers refactoring"
This commit is contained in:
@@ -432,3 +432,6 @@ DEFINE_list('monkey_patch_modules',
|
||||
DEFINE_bool('allow_resize_to_same_host', False,
|
||||
'Allow destination machine to match source for resize. Useful'
|
||||
' when testing in environments with only one host machine.')
|
||||
|
||||
DEFINE_string('stub_network', False,
|
||||
'Stub network related code')
|
||||
|
||||
@@ -60,24 +60,10 @@ class AbstractScheduler(driver.Scheduler):
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource in this Zone."""
|
||||
host = build_plan_item['hostname']
|
||||
base_options = request_spec['instance_properties']
|
||||
image = request_spec['image']
|
||||
instance_type = request_spec.get('instance_type')
|
||||
|
||||
# TODO(sandy): I guess someone needs to add block_device_mapping
|
||||
# support at some point? Also, OS API has no concept of security
|
||||
# groups.
|
||||
instance = compute_api.API().create_db_entry_for_new_instance(context,
|
||||
instance_type, image, base_options, None, [])
|
||||
|
||||
instance_id = instance['id']
|
||||
kwargs['instance_id'] = instance_id
|
||||
|
||||
queue = db.queue_get_for(context, "compute", host)
|
||||
params = {"method": "run_instance", "args": kwargs}
|
||||
rpc.cast(context, queue, params)
|
||||
LOG.debug(_("Provisioning locally via compute node %(host)s")
|
||||
% locals())
|
||||
instance = self.create_instance_db_entry(context, request_spec)
|
||||
driver.cast_to_compute_host(context, host,
|
||||
'run_instance', instance_id=instance['id'], **kwargs)
|
||||
return driver.encode_instance(instance, local=True)
|
||||
|
||||
def _decrypt_blob(self, blob):
|
||||
"""Returns the decrypted blob or None if invalid. Broken out
|
||||
@@ -112,7 +98,7 @@ class AbstractScheduler(driver.Scheduler):
|
||||
files = kwargs['injected_files']
|
||||
child_zone = zone_info['child_zone']
|
||||
child_blob = zone_info['child_blob']
|
||||
zone = db.zone_get(context, child_zone)
|
||||
zone = db.zone_get(context.elevated(), child_zone)
|
||||
url = zone.api_url
|
||||
LOG.debug(_("Forwarding instance create call to child zone %(url)s"
|
||||
". ReservationID=%(reservation_id)s") % locals())
|
||||
@@ -132,12 +118,13 @@ class AbstractScheduler(driver.Scheduler):
|
||||
# arguments are passed as keyword arguments
|
||||
# (there's a reasonable default for ipgroups in the
|
||||
# novaclient call).
|
||||
nova.servers.create(name, image_ref, flavor_id,
|
||||
instance = nova.servers.create(name, image_ref, flavor_id,
|
||||
meta=meta, files=files, zone_blob=child_blob,
|
||||
reservation_id=reservation_id)
|
||||
return driver.encode_instance(instance._info, local=False)
|
||||
|
||||
def _provision_resource_from_blob(self, context, build_plan_item,
|
||||
instance_id, request_spec, kwargs):
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource locally or in a child zone
|
||||
based on what is stored in the zone blob info.
|
||||
|
||||
@@ -165,21 +152,21 @@ class AbstractScheduler(driver.Scheduler):
|
||||
|
||||
# Valid data ... is it for us?
|
||||
if 'child_zone' in host_info and 'child_blob' in host_info:
|
||||
self._ask_child_zone_to_create_instance(context, host_info,
|
||||
request_spec, kwargs)
|
||||
instance = self._ask_child_zone_to_create_instance(context,
|
||||
host_info, request_spec, kwargs)
|
||||
else:
|
||||
self._provision_resource_locally(context, host_info, request_spec,
|
||||
kwargs)
|
||||
instance = self._provision_resource_locally(context,
|
||||
host_info, request_spec, kwargs)
|
||||
return instance
|
||||
|
||||
def _provision_resource(self, context, build_plan_item, instance_id,
|
||||
def _provision_resource(self, context, build_plan_item,
|
||||
request_spec, kwargs):
|
||||
"""Create the requested resource in this Zone or a child zone."""
|
||||
if "hostname" in build_plan_item:
|
||||
self._provision_resource_locally(context, build_plan_item,
|
||||
request_spec, kwargs)
|
||||
return
|
||||
self._provision_resource_from_blob(context, build_plan_item,
|
||||
instance_id, request_spec, kwargs)
|
||||
return self._provision_resource_locally(context,
|
||||
build_plan_item, request_spec, kwargs)
|
||||
return self._provision_resource_from_blob(context,
|
||||
build_plan_item, request_spec, kwargs)
|
||||
|
||||
def _adjust_child_weights(self, child_results, zones):
|
||||
"""Apply the Scale and Offset values from the Zone definition
|
||||
@@ -205,8 +192,7 @@ class AbstractScheduler(driver.Scheduler):
|
||||
LOG.exception(_("Bad child zone scaling values "
|
||||
"for Zone: %(zone_id)s") % locals())
|
||||
|
||||
def schedule_run_instance(self, context, instance_id, request_spec,
|
||||
*args, **kwargs):
|
||||
def schedule_run_instance(self, context, request_spec, *args, **kwargs):
|
||||
"""This method is called from nova.compute.api to provision
|
||||
an instance. However we need to look at the parameters being
|
||||
passed in to see if this is a request to:
|
||||
@@ -214,13 +200,16 @@ class AbstractScheduler(driver.Scheduler):
|
||||
2. Use the Build Plan information in the request parameters
|
||||
to simply create the instance (either in this zone or
|
||||
a child zone).
|
||||
|
||||
returns list of instances created.
|
||||
"""
|
||||
# TODO(sandy): We'll have to look for richer specs at some point.
|
||||
blob = request_spec.get('blob')
|
||||
if blob:
|
||||
self._provision_resource(context, request_spec, instance_id,
|
||||
request_spec, kwargs)
|
||||
return None
|
||||
instance = self._provision_resource(context,
|
||||
request_spec, request_spec, kwargs)
|
||||
# Caller expects a list of instances
|
||||
return [instance]
|
||||
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
|
||||
@@ -231,16 +220,16 @@ class AbstractScheduler(driver.Scheduler):
|
||||
if not build_plan:
|
||||
raise driver.NoValidHost(_('No hosts were available'))
|
||||
|
||||
instances = []
|
||||
for num in xrange(num_instances):
|
||||
if not build_plan:
|
||||
break
|
||||
build_plan_item = build_plan.pop(0)
|
||||
self._provision_resource(context, build_plan_item, instance_id,
|
||||
request_spec, kwargs)
|
||||
instance = self._provision_resource(context,
|
||||
build_plan_item, request_spec, kwargs)
|
||||
instances.append(instance)
|
||||
|
||||
# Returning None short-circuits the routing to Compute (since
|
||||
# we've already done it here)
|
||||
return None
|
||||
return instances
|
||||
|
||||
def select(self, context, request_spec, *args, **kwargs):
|
||||
"""Select returns a list of weights and zone/host information
|
||||
@@ -251,7 +240,7 @@ class AbstractScheduler(driver.Scheduler):
|
||||
return self._schedule(context, "compute", request_spec,
|
||||
*args, **kwargs)
|
||||
|
||||
def schedule(self, context, topic, request_spec, *args, **kwargs):
|
||||
def schedule(self, context, topic, method, *args, **kwargs):
|
||||
"""The schedule() contract requires we return the one
|
||||
best-suited host for this request.
|
||||
"""
|
||||
@@ -285,7 +274,7 @@ class AbstractScheduler(driver.Scheduler):
|
||||
weighted_hosts = self.weigh_hosts(topic, request_spec, filtered_hosts)
|
||||
# Next, tack on the host weights from the child zones
|
||||
json_spec = json.dumps(request_spec)
|
||||
all_zones = db.zone_get_all(context)
|
||||
all_zones = db.zone_get_all(context.elevated())
|
||||
child_results = self._call_zone_method(context, "select",
|
||||
specs=json_spec, zones=all_zones)
|
||||
self._adjust_child_weights(child_results, all_zones)
|
||||
|
||||
@@ -65,7 +65,7 @@ def get_zone_list(context):
|
||||
for item in items:
|
||||
item['api_url'] = item['api_url'].replace('\\/', '/')
|
||||
if not items:
|
||||
items = db.zone_get_all(context)
|
||||
items = db.zone_get_all(context.elevated())
|
||||
return items
|
||||
|
||||
|
||||
@@ -116,7 +116,7 @@ def call_zone_method(context, method_name, errors_to_ignore=None,
|
||||
pool = greenpool.GreenPool()
|
||||
results = []
|
||||
if zones is None:
|
||||
zones = db.zone_get_all(context)
|
||||
zones = db.zone_get_all(context.elevated())
|
||||
for zone in zones:
|
||||
try:
|
||||
# Do this on behalf of the user ...
|
||||
|
||||
@@ -160,8 +160,7 @@ class LeastCostScheduler(base_scheduler.BaseScheduler):
|
||||
|
||||
weighted = []
|
||||
weight_log = []
|
||||
for cost, (hostname, service) in zip(costs, hosts):
|
||||
caps = service[topic]
|
||||
for cost, (hostname, caps) in zip(costs, hosts):
|
||||
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
|
||||
weight_dict = dict(weight=cost, hostname=hostname,
|
||||
capabilities=caps)
|
||||
|
||||
@@ -38,7 +38,8 @@ flags.DEFINE_string('volume_scheduler_driver',
|
||||
# A mapping of methods to topics so we can figure out which driver to use.
|
||||
_METHOD_MAP = {'run_instance': 'compute',
|
||||
'start_instance': 'compute',
|
||||
'create_volume': 'volume'}
|
||||
'create_volume': 'volume',
|
||||
'create_volumes': 'volume'}
|
||||
|
||||
|
||||
class MultiScheduler(driver.Scheduler):
|
||||
@@ -69,5 +70,6 @@ class MultiScheduler(driver.Scheduler):
|
||||
for k, v in self.drivers.iteritems():
|
||||
v.set_zone_manager(zone_manager)
|
||||
|
||||
def schedule(self, context, topic, *_args, **_kwargs):
|
||||
return self.drivers[topic].schedule(context, topic, *_args, **_kwargs)
|
||||
def schedule(self, context, topic, method, *_args, **_kwargs):
|
||||
return self.drivers[topic].schedule(context, topic,
|
||||
method, *_args, **_kwargs)
|
||||
|
||||
@@ -39,47 +39,50 @@ flags.DEFINE_integer("max_networks", 1000,
|
||||
class SimpleScheduler(chance.ChanceScheduler):
|
||||
"""Implements Naive Scheduler that tries to find least loaded host."""
|
||||
|
||||
def _schedule_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
def _schedule_instance(self, context, instance_opts, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest running instances."""
|
||||
instance_ref = db.instance_get(context, instance_id)
|
||||
if (instance_ref['availability_zone']
|
||||
and ':' in instance_ref['availability_zone']
|
||||
and context.is_admin):
|
||||
zone, _x, host = instance_ref['availability_zone'].partition(':')
|
||||
|
||||
availability_zone = instance_opts.get('availability_zone')
|
||||
|
||||
if availability_zone and context.is_admin and \
|
||||
(':' in availability_zone):
|
||||
zone, host = availability_zone.split(':', 1)
|
||||
service = db.service_get_by_args(context.elevated(), host,
|
||||
'nova-compute')
|
||||
if not self.service_is_up(service):
|
||||
raise driver.WillNotSchedule(_("Host %s is not alive") % host)
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = utils.utcnow()
|
||||
db.instance_update(context, instance_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
|
||||
results = db.service_get_all_compute_sorted(context)
|
||||
for result in results:
|
||||
(service, instance_cores) = result
|
||||
if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
|
||||
if instance_cores + instance_opts['vcpus'] > FLAGS.max_cores:
|
||||
raise driver.NoValidHost(_("All hosts have too many cores"))
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = utils.utcnow()
|
||||
db.instance_update(context,
|
||||
instance_id,
|
||||
{'host': service['host'],
|
||||
'scheduled_at': now})
|
||||
return service['host']
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
|
||||
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
|
||||
def schedule_run_instance(self, context, request_spec, *_args, **_kwargs):
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
instances = []
|
||||
for num in xrange(num_instances):
|
||||
host = self._schedule_instance(context,
|
||||
request_spec['instance_properties'], *_args, **_kwargs)
|
||||
instance_ref = self.create_instance_db_entry(context,
|
||||
request_spec)
|
||||
driver.cast_to_compute_host(context, host, 'run_instance',
|
||||
instance_id=instance_ref['id'], **_kwargs)
|
||||
instances.append(driver.encode_instance(instance_ref))
|
||||
return instances
|
||||
|
||||
def schedule_start_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
return self._schedule_instance(context, instance_id, *_args, **_kwargs)
|
||||
instance_ref = db.instance_get(context, instance_id)
|
||||
host = self._schedule_instance(context, instance_ref,
|
||||
*_args, **_kwargs)
|
||||
driver.cast_to_compute_host(context, host, 'start_instance',
|
||||
instance_id=intance_id, **_kwargs)
|
||||
|
||||
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest volumes."""
|
||||
@@ -92,13 +95,9 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
'nova-volume')
|
||||
if not self.service_is_up(service):
|
||||
raise driver.WillNotSchedule(_("Host %s not available") % host)
|
||||
|
||||
# TODO(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
driver.cast_to_volume_host(context, host, 'create_volume',
|
||||
volume_id=volume_id, **_kwargs)
|
||||
return None
|
||||
results = db.service_get_all_volume_sorted(context)
|
||||
for result in results:
|
||||
(service, volume_gigabytes) = result
|
||||
@@ -106,14 +105,9 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
raise driver.NoValidHost(_("All hosts have too many "
|
||||
"gigabytes"))
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context,
|
||||
volume_id,
|
||||
{'host': service['host'],
|
||||
'scheduled_at': now})
|
||||
return service['host']
|
||||
driver.cast_to_volume_host(context, service['host'],
|
||||
'create_volume', volume_id=volume_id, **_kwargs)
|
||||
return None
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
@@ -127,7 +121,9 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
if instance_count >= FLAGS.max_networks:
|
||||
raise driver.NoValidHost(_("All hosts have too many networks"))
|
||||
if self.service_is_up(service):
|
||||
return service['host']
|
||||
driver.cast_to_network_host(context, service['host'],
|
||||
'set_network_host', **_kwargs)
|
||||
return None
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
|
||||
@@ -195,8 +195,6 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
'display_description': vol['description'],
|
||||
'volume_type_id': vol['volume_type_id'],
|
||||
'metadata': dict(to_vsa_id=vsa_id),
|
||||
'host': vol['host'],
|
||||
'scheduled_at': now
|
||||
}
|
||||
|
||||
size = vol['size']
|
||||
@@ -205,12 +203,10 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
LOG.debug(_("Provision volume %(name)s of size %(size)s GB on "\
|
||||
"host %(host)s"), locals())
|
||||
|
||||
volume_ref = db.volume_create(context, options)
|
||||
rpc.cast(context,
|
||||
db.queue_get_for(context, "volume", vol['host']),
|
||||
{"method": "create_volume",
|
||||
"args": {"volume_id": volume_ref['id'],
|
||||
"snapshot_id": None}})
|
||||
volume_ref = db.volume_create(context.elevated(), options)
|
||||
driver.cast_to_volume_host(context, vol['host'],
|
||||
'create_volume', volume_id=volume_ref['id'],
|
||||
snapshot_id=None)
|
||||
|
||||
def _check_host_enforcement(self, context, availability_zone):
|
||||
if (availability_zone
|
||||
@@ -274,7 +270,6 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
def schedule_create_volumes(self, context, request_spec,
|
||||
availability_zone=None, *_args, **_kwargs):
|
||||
"""Picks hosts for hosting multiple volumes."""
|
||||
|
||||
num_volumes = request_spec.get('num_volumes')
|
||||
LOG.debug(_("Attempting to spawn %(num_volumes)d volume(s)") %
|
||||
locals())
|
||||
@@ -291,7 +286,8 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
|
||||
for vol in volume_params:
|
||||
self._provision_volume(context, vol, vsa_id, availability_zone)
|
||||
except:
|
||||
except Exception:
|
||||
LOG.exception(_("Error creating volumes"))
|
||||
if vsa_id:
|
||||
db.vsa_update(context, vsa_id, dict(status=VsaState.FAILED))
|
||||
|
||||
@@ -310,10 +306,9 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
host = self._check_host_enforcement(context,
|
||||
volume_ref['availability_zone'])
|
||||
if host:
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
return host
|
||||
driver.cast_to_volume_host(context, host, 'create_volume',
|
||||
volume_id=volume_id, **_kwargs)
|
||||
return None
|
||||
|
||||
volume_type_id = volume_ref['volume_type_id']
|
||||
if volume_type_id:
|
||||
@@ -344,18 +339,16 @@ class VsaScheduler(simple.SimpleScheduler):
|
||||
|
||||
try:
|
||||
(host, qos_cap) = self._select_hosts(request_spec, all_hosts=hosts)
|
||||
except:
|
||||
except Exception:
|
||||
LOG.exception(_("Error creating volume"))
|
||||
if volume_ref['to_vsa_id']:
|
||||
db.vsa_update(context, volume_ref['to_vsa_id'],
|
||||
dict(status=VsaState.FAILED))
|
||||
raise
|
||||
|
||||
if host:
|
||||
now = utils.utcnow()
|
||||
db.volume_update(context, volume_id, {'host': host,
|
||||
'scheduled_at': now})
|
||||
self._consume_resource(qos_cap, volume_ref['size'], -1)
|
||||
return host
|
||||
driver.cast_to_volume_host(context, host, 'create_volume',
|
||||
volume_id=volume_id, **_kwargs)
|
||||
|
||||
def _consume_full_drive(self, qos_values, direction):
|
||||
qos_values['FullDrive']['NumFreeDrives'] += direction
|
||||
|
||||
@@ -35,7 +35,7 @@ class ZoneScheduler(driver.Scheduler):
|
||||
for topic and availability zone (if defined).
|
||||
"""
|
||||
|
||||
if zone is None:
|
||||
if not zone:
|
||||
return self.hosts_up(context, topic)
|
||||
|
||||
services = db.service_get_all_by_topic(context, topic)
|
||||
@@ -44,16 +44,34 @@ class ZoneScheduler(driver.Scheduler):
|
||||
if self.service_is_up(service)
|
||||
and service.availability_zone == zone]
|
||||
|
||||
def schedule(self, context, topic, *_args, **_kwargs):
|
||||
def _schedule(self, context, topic, request_spec, **kwargs):
|
||||
"""Picks a host that is up at random in selected
|
||||
availability zone (if defined).
|
||||
"""
|
||||
|
||||
zone = _kwargs.get('availability_zone')
|
||||
hosts = self.hosts_up_with_zone(context, topic, zone)
|
||||
zone = kwargs.get('availability_zone')
|
||||
if not zone and request_spec:
|
||||
zone = request_spec['instance_properties'].get(
|
||||
'availability_zone')
|
||||
hosts = self.hosts_up_with_zone(context.elevated(), topic, zone)
|
||||
if not hosts:
|
||||
raise driver.NoValidHost(_("Scheduler was unable to locate a host"
|
||||
" for this request. Is the appropriate"
|
||||
" service running?"))
|
||||
|
||||
return hosts[int(random.random() * len(hosts))]
|
||||
|
||||
def schedule(self, context, topic, method, *_args, **kwargs):
|
||||
host = self._schedule(context, topic, None, **kwargs)
|
||||
driver.cast_to_host(context, topic, host, method, **kwargs)
|
||||
|
||||
def schedule_run_instance(self, context, request_spec, *_args, **kwargs):
|
||||
"""Builds and starts instances on selected hosts"""
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
instances = []
|
||||
for num in xrange(num_instances):
|
||||
host = self._schedule(context, 'compute', request_spec, **kwargs)
|
||||
instance = self.create_instance_db_entry(context, request_spec)
|
||||
driver.cast_to_compute_host(context, host,
|
||||
'run_instance', instance_id=instance['id'], **kwargs)
|
||||
instances.append(driver.encode_instance(instance))
|
||||
return instances
|
||||
|
||||
@@ -20,6 +20,7 @@ import json
|
||||
|
||||
import nova.db
|
||||
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
@@ -102,7 +103,7 @@ def fake_empty_call_zone_method(context, method, specs, zones):
|
||||
was_called = False
|
||||
|
||||
|
||||
def fake_provision_resource(context, item, instance_id, request_spec, kwargs):
|
||||
def fake_provision_resource(context, item, request_spec, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
@@ -118,8 +119,7 @@ def fake_provision_resource_locally(context, build_plan, request_spec, kwargs):
|
||||
was_called = True
|
||||
|
||||
|
||||
def fake_provision_resource_from_blob(context, item, instance_id,
|
||||
request_spec, kwargs):
|
||||
def fake_provision_resource_from_blob(context, item, request_spec, kwargs):
|
||||
global was_called
|
||||
was_called = True
|
||||
|
||||
@@ -185,7 +185,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
zm = FakeZoneManager()
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = {}
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
build_plan = sched.select(fake_context,
|
||||
{'instance_type': {'memory_mb': 512},
|
||||
'num_instances': 4})
|
||||
@@ -229,9 +229,10 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
zm = FakeEmptyZoneManager()
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = {}
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
request_spec = {}
|
||||
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
|
||||
fake_context, 1,
|
||||
fake_context, request_spec,
|
||||
dict(host_filter=None, instance_type={}))
|
||||
|
||||
def test_schedule_do_not_schedule_with_hint(self):
|
||||
@@ -250,8 +251,8 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
'blob': "Non-None blob data",
|
||||
}
|
||||
|
||||
result = sched.schedule_run_instance(None, 1, request_spec)
|
||||
self.assertEquals(None, result)
|
||||
instances = sched.schedule_run_instance(None, request_spec)
|
||||
self.assertTrue(instances)
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_local(self):
|
||||
@@ -263,7 +264,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
fake_provision_resource_locally)
|
||||
|
||||
request_spec = {'hostname': "foo"}
|
||||
sched._provision_resource(None, request_spec, 1, request_spec, {})
|
||||
sched._provision_resource(None, request_spec, request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_remote(self):
|
||||
@@ -275,7 +276,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
fake_provision_resource_from_blob)
|
||||
|
||||
request_spec = {}
|
||||
sched._provision_resource(None, request_spec, 1, request_spec, {})
|
||||
sched._provision_resource(None, request_spec, request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
def test_provision_resource_from_blob_empty(self):
|
||||
@@ -285,7 +286,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
request_spec = {}
|
||||
self.assertRaises(abstract_scheduler.InvalidBlob,
|
||||
sched._provision_resource_from_blob,
|
||||
None, {}, 1, {}, {})
|
||||
None, {}, {}, {})
|
||||
|
||||
def test_provision_resource_from_blob_with_local_blob(self):
|
||||
"""
|
||||
@@ -303,20 +304,21 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
# return fake instances
|
||||
return {'id': 1, 'uuid': 'f874093c-7b17-49c0-89c3-22a5348497f9'}
|
||||
|
||||
def fake_rpc_cast(*args, **kwargs):
|
||||
def fake_cast_to_compute_host(*args, **kwargs):
|
||||
pass
|
||||
|
||||
self.stubs.Set(sched, '_decrypt_blob',
|
||||
fake_decrypt_blob_returns_local_info)
|
||||
self.stubs.Set(driver, 'cast_to_compute_host',
|
||||
fake_cast_to_compute_host)
|
||||
self.stubs.Set(compute_api.API,
|
||||
'create_db_entry_for_new_instance',
|
||||
fake_create_db_entry_for_new_instance)
|
||||
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
|
||||
|
||||
build_plan_item = {'blob': "Non-None blob data"}
|
||||
request_spec = {'image': {}, 'instance_properties': {}}
|
||||
|
||||
sched._provision_resource_from_blob(None, build_plan_item, 1,
|
||||
sched._provision_resource_from_blob(None, build_plan_item,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
@@ -335,7 +337,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
|
||||
request_spec = {'blob': "Non-None blob data"}
|
||||
|
||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||
sched._provision_resource_from_blob(None, request_spec,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
@@ -352,7 +354,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
|
||||
request_spec = {'child_blob': True, 'child_zone': True}
|
||||
|
||||
sched._provision_resource_from_blob(None, request_spec, 1,
|
||||
sched._provision_resource_from_blob(None, request_spec,
|
||||
request_spec, {})
|
||||
self.assertTrue(was_called)
|
||||
|
||||
@@ -386,7 +388,7 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
zm.service_states = {}
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = {}
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
build_plan = sched.select(fake_context,
|
||||
{'instance_type': {'memory_mb': 512},
|
||||
'num_instances': 4})
|
||||
@@ -394,6 +396,45 @@ class AbstractSchedulerTestCase(test.TestCase):
|
||||
# 0 from local zones, 12 from remotes
|
||||
self.assertEqual(12, len(build_plan))
|
||||
|
||||
def test_run_instance_non_admin(self):
|
||||
"""Test creating an instance locally using run_instance, passing
|
||||
a non-admin context. DB actions should work."""
|
||||
sched = FakeAbstractScheduler()
|
||||
|
||||
def fake_cast_to_compute_host(*args, **kwargs):
|
||||
pass
|
||||
|
||||
def fake_zone_get_all_zero(context):
|
||||
# make sure this is called with admin context, even though
|
||||
# we're using user context below
|
||||
self.assertTrue(context.is_admin)
|
||||
return []
|
||||
|
||||
self.stubs.Set(driver, 'cast_to_compute_host',
|
||||
fake_cast_to_compute_host)
|
||||
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
|
||||
self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all_zero)
|
||||
|
||||
zm = FakeZoneManager()
|
||||
sched.set_zone_manager(zm)
|
||||
|
||||
fake_context = context.RequestContext('user', 'project')
|
||||
|
||||
request_spec = {
|
||||
'image': {'properties': {}},
|
||||
'security_group': [],
|
||||
'instance_properties': {
|
||||
'project_id': fake_context.project_id,
|
||||
'user_id': fake_context.user_id},
|
||||
'instance_type': {'memory_mb': 256},
|
||||
'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter'
|
||||
}
|
||||
|
||||
instances = sched.schedule_run_instance(fake_context, request_spec)
|
||||
self.assertEqual(len(instances), 1)
|
||||
self.assertFalse(instances[0].get('_is_precooked', False))
|
||||
nova.db.instance_destroy(fake_context, instances[0]['id'])
|
||||
|
||||
|
||||
class BaseSchedulerTestCase(test.TestCase):
|
||||
"""Test case for Base Scheduler."""
|
||||
|
||||
@@ -134,7 +134,7 @@ class LeastCostSchedulerTestCase(test.TestCase):
|
||||
|
||||
expected = []
|
||||
for idx, (hostname, services) in enumerate(hosts):
|
||||
caps = copy.deepcopy(services["compute"])
|
||||
caps = copy.deepcopy(services)
|
||||
# Costs are normalized so over 10 hosts, each host with increasing
|
||||
# free ram will cost 1/N more. Since the lowest cost host has some
|
||||
# free ram, we add in the 1/N for the base_cost
|
||||
|
||||
@@ -22,6 +22,7 @@ from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.volume import volume_types
|
||||
@@ -37,6 +38,10 @@ scheduled_volume = {}
|
||||
global_volume = {}
|
||||
|
||||
|
||||
def fake_rpc_cast(*args, **kwargs):
|
||||
pass
|
||||
|
||||
|
||||
class FakeVsaLeastUsedScheduler(
|
||||
vsa_sched.VsaSchedulerLeastUsedHost):
|
||||
# No need to stub anything at the moment
|
||||
@@ -170,12 +175,10 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
LOG.debug(_("Test: provision vol %(name)s on host %(host)s"),
|
||||
locals())
|
||||
LOG.debug(_("\t vol=%(vol)s"), locals())
|
||||
pass
|
||||
|
||||
def _fake_vsa_update(self, context, vsa_id, values):
|
||||
LOG.debug(_("Test: VSA update request: vsa_id=%(vsa_id)s "\
|
||||
"values=%(values)s"), locals())
|
||||
pass
|
||||
|
||||
def _fake_volume_create(self, context, options):
|
||||
LOG.debug(_("Test: Volume create: %s"), options)
|
||||
@@ -196,7 +199,6 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
"values=%(values)s"), locals())
|
||||
global scheduled_volume
|
||||
scheduled_volume = {'id': volume_id, 'host': values['host']}
|
||||
pass
|
||||
|
||||
def _fake_service_get_by_args(self, context, host, binary):
|
||||
return "service"
|
||||
@@ -209,7 +211,6 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
|
||||
def setUp(self, sched_class=None):
|
||||
super(VsaSchedulerTestCase, self).setUp()
|
||||
self.stubs = stubout.StubOutForTesting()
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
if sched_class is None:
|
||||
@@ -220,6 +221,7 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
self.host_num = 10
|
||||
self.drive_type_num = 5
|
||||
|
||||
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
|
||||
self.stubs.Set(self.sched,
|
||||
'_get_service_states', self._fake_get_service_states)
|
||||
self.stubs.Set(self.sched,
|
||||
@@ -234,8 +236,6 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
def tearDown(self):
|
||||
for name in self.created_types_lst:
|
||||
volume_types.purge(self.context, name)
|
||||
|
||||
self.stubs.UnsetAll()
|
||||
super(VsaSchedulerTestCase, self).tearDown()
|
||||
|
||||
def test_vsa_sched_create_volumes_simple(self):
|
||||
@@ -333,6 +333,8 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
self.stubs.Set(self.sched,
|
||||
'_get_service_states', self._fake_get_service_states)
|
||||
self.stubs.Set(nova.db, 'volume_create', self._fake_volume_create)
|
||||
self.stubs.Set(nova.db, 'volume_update', self._fake_volume_update)
|
||||
self.stubs.Set(rpc, 'cast', fake_rpc_cast)
|
||||
|
||||
self.sched.schedule_create_volumes(self.context,
|
||||
request_spec,
|
||||
@@ -467,10 +469,9 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
self.stubs.Set(self.sched,
|
||||
'service_is_up', self._fake_service_is_up_True)
|
||||
|
||||
host = self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(host, 'host_3')
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
self.assertEqual(scheduled_volume['host'], 'host_3')
|
||||
|
||||
@@ -514,10 +515,9 @@ class VsaSchedulerTestCase(test.TestCase):
|
||||
global_volume['volume_type_id'] = volume_type['id']
|
||||
global_volume['size'] = 0
|
||||
|
||||
host = self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(host, 'host_2')
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
self.assertEqual(scheduled_volume['host'], 'host_2')
|
||||
|
||||
@@ -529,7 +529,6 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
|
||||
FakeVsaMostAvailCapacityScheduler())
|
||||
|
||||
def tearDown(self):
|
||||
self.stubs.UnsetAll()
|
||||
super(VsaSchedulerTestCaseMostAvail, self).tearDown()
|
||||
|
||||
def test_vsa_sched_create_single_volume(self):
|
||||
@@ -558,10 +557,9 @@ class VsaSchedulerTestCaseMostAvail(VsaSchedulerTestCase):
|
||||
global_volume['volume_type_id'] = volume_type['id']
|
||||
global_volume['size'] = 0
|
||||
|
||||
host = self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
self.sched.schedule_create_volume(self.context,
|
||||
123, availability_zone=None)
|
||||
|
||||
self.assertEqual(host, 'host_9')
|
||||
self.assertEqual(scheduled_volume['id'], 123)
|
||||
self.assertEqual(scheduled_volume['host'], 'host_9')
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova.scheduler import driver as scheduler_driver
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
from nova import utils
|
||||
@@ -56,6 +57,38 @@ class FakeTime(object):
|
||||
self.counter += t
|
||||
|
||||
|
||||
orig_rpc_call = rpc.call
|
||||
orig_rpc_cast = rpc.cast
|
||||
|
||||
|
||||
def rpc_call_wrapper(context, topic, msg, do_cast=True):
|
||||
"""Stub out the scheduler creating the instance entry"""
|
||||
if topic == FLAGS.scheduler_topic and \
|
||||
msg['method'] == 'run_instance':
|
||||
request_spec = msg['args']['request_spec']
|
||||
scheduler = scheduler_driver.Scheduler
|
||||
num_instances = request_spec.get('num_instances', 1)
|
||||
instances = []
|
||||
for x in xrange(num_instances):
|
||||
instance = scheduler().create_instance_db_entry(
|
||||
context, request_spec)
|
||||
encoded = scheduler_driver.encode_instance(instance)
|
||||
instances.append(encoded)
|
||||
return instances
|
||||
else:
|
||||
if do_cast:
|
||||
orig_rpc_cast(context, topic, msg)
|
||||
else:
|
||||
return orig_rpc_call(context, topic, msg)
|
||||
|
||||
|
||||
def rpc_cast_wrapper(context, topic, msg):
|
||||
"""Stub out the scheduler creating the instance entry in
|
||||
the reservation_id case.
|
||||
"""
|
||||
rpc_call_wrapper(context, topic, msg, do_cast=True)
|
||||
|
||||
|
||||
def nop_report_driver_status(self):
|
||||
pass
|
||||
|
||||
@@ -80,6 +113,8 @@ class ComputeTestCase(test.TestCase):
|
||||
'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||
|
||||
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
|
||||
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
|
||||
self.stubs.Set(rpc, 'cast', rpc_cast_wrapper)
|
||||
|
||||
def _create_instance(self, params=None):
|
||||
"""Create a test instance"""
|
||||
@@ -142,7 +177,7 @@ class ComputeTestCase(test.TestCase):
|
||||
"""Verify that an instance cannot be created without a display_name."""
|
||||
cases = [dict(), dict(display_name=None)]
|
||||
for instance in cases:
|
||||
ref = self.compute_api.create(self.context,
|
||||
(ref, resv_id) = self.compute_api.create(self.context,
|
||||
instance_types.get_default_instance_type(), None, **instance)
|
||||
try:
|
||||
self.assertNotEqual(ref[0]['display_name'], None)
|
||||
@@ -152,7 +187,7 @@ class ComputeTestCase(test.TestCase):
|
||||
def test_create_instance_associates_security_groups(self):
|
||||
"""Make sure create associates security groups"""
|
||||
group = self._create_group()
|
||||
ref = self.compute_api.create(
|
||||
(ref, resv_id) = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_href=None,
|
||||
@@ -212,7 +247,7 @@ class ComputeTestCase(test.TestCase):
|
||||
('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello'),
|
||||
('hello_server', 'hello-server')]
|
||||
for display_name, hostname in cases:
|
||||
ref = self.compute_api.create(self.context,
|
||||
(ref, resv_id) = self.compute_api.create(self.context,
|
||||
instance_types.get_default_instance_type(), None,
|
||||
display_name=display_name)
|
||||
try:
|
||||
@@ -224,7 +259,7 @@ class ComputeTestCase(test.TestCase):
|
||||
"""Make sure destroying disassociates security groups"""
|
||||
group = self._create_group()
|
||||
|
||||
ref = self.compute_api.create(
|
||||
(ref, resv_id) = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_href=None,
|
||||
@@ -240,7 +275,7 @@ class ComputeTestCase(test.TestCase):
|
||||
"""Make sure destroying security groups disassociates instances"""
|
||||
group = self._create_group()
|
||||
|
||||
ref = self.compute_api.create(
|
||||
(ref, resv_id) = self.compute_api.create(
|
||||
self.context,
|
||||
instance_type=instance_types.get_default_instance_type(),
|
||||
image_href=None,
|
||||
@@ -1398,6 +1433,84 @@ class ComputeTestCase(test.TestCase):
|
||||
'swap'),
|
||||
swap_size)
|
||||
|
||||
def test_reservation_id_one_instance(self):
|
||||
"""Verify building an instance has a reservation_id that
|
||||
matches return value from create"""
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
instance_types.get_default_instance_type(), None)
|
||||
try:
|
||||
self.assertEqual(len(refs), 1)
|
||||
self.assertEqual(refs[0]['reservation_id'], resv_id)
|
||||
finally:
|
||||
db.instance_destroy(self.context, refs[0]['id'])
|
||||
|
||||
def test_reservation_ids_two_instances(self):
|
||||
"""Verify building 2 instances at once results in a
|
||||
reservation_id being returned equal to reservation id set
|
||||
in both instances
|
||||
"""
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
instance_types.get_default_instance_type(), None,
|
||||
min_count=2, max_count=2)
|
||||
try:
|
||||
self.assertEqual(len(refs), 2)
|
||||
self.assertNotEqual(resv_id, None)
|
||||
finally:
|
||||
for instance in refs:
|
||||
self.assertEqual(instance['reservation_id'], resv_id)
|
||||
db.instance_destroy(self.context, instance['id'])
|
||||
|
||||
def test_reservation_ids_two_instances_no_wait(self):
|
||||
"""Verify building 2 instances at once without waiting for
|
||||
instance IDs results in a reservation_id being returned equal
|
||||
to reservation id set in both instances
|
||||
"""
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
instance_types.get_default_instance_type(), None,
|
||||
min_count=2, max_count=2, wait_for_instances=False)
|
||||
try:
|
||||
self.assertEqual(refs, None)
|
||||
self.assertNotEqual(resv_id, None)
|
||||
finally:
|
||||
instances = self.compute_api.get_all(self.context,
|
||||
search_opts={'reservation_id': resv_id})
|
||||
self.assertEqual(len(instances), 2)
|
||||
for instance in instances:
|
||||
self.assertEqual(instance['reservation_id'], resv_id)
|
||||
db.instance_destroy(self.context, instance['id'])
|
||||
|
||||
def test_create_with_specified_reservation_id(self):
|
||||
"""Verify building instances with a specified
|
||||
reservation_id results in the correct reservation_id
|
||||
being set
|
||||
"""
|
||||
|
||||
# We need admin context to be able to specify our own
|
||||
# reservation_ids.
|
||||
context = self.context.elevated()
|
||||
# 1 instance
|
||||
(refs, resv_id) = self.compute_api.create(context,
|
||||
instance_types.get_default_instance_type(), None,
|
||||
min_count=1, max_count=1, reservation_id='meow')
|
||||
try:
|
||||
self.assertEqual(len(refs), 1)
|
||||
self.assertEqual(resv_id, 'meow')
|
||||
finally:
|
||||
self.assertEqual(refs[0]['reservation_id'], resv_id)
|
||||
db.instance_destroy(self.context, refs[0]['id'])
|
||||
|
||||
# 2 instances
|
||||
(refs, resv_id) = self.compute_api.create(context,
|
||||
instance_types.get_default_instance_type(), None,
|
||||
min_count=2, max_count=2, reservation_id='woof')
|
||||
try:
|
||||
self.assertEqual(len(refs), 2)
|
||||
self.assertEqual(resv_id, 'woof')
|
||||
finally:
|
||||
for instance in refs:
|
||||
self.assertEqual(instance['reservation_id'], resv_id)
|
||||
db.instance_destroy(self.context, instance['id'])
|
||||
|
||||
|
||||
class ComputeTestMinRamMinDisk(test.TestCase):
|
||||
def setUp(self):
|
||||
@@ -1405,6 +1518,8 @@ class ComputeTestMinRamMinDisk(test.TestCase):
|
||||
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||
self.compute_api = compute.API()
|
||||
self.context = context.RequestContext('fake', 'fake')
|
||||
self.stubs.Set(rpc, 'call', rpc_call_wrapper)
|
||||
self.stubs.Set(rpc, 'cast', rpc_cast_wrapper)
|
||||
self.fake_image = {
|
||||
'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
|
||||
|
||||
@@ -1425,10 +1540,9 @@ class ComputeTestMinRamMinDisk(test.TestCase):
|
||||
|
||||
# Now increase the inst_type memory and make sure all is fine.
|
||||
inst_type['memory_mb'] = 2
|
||||
ref = self.compute_api.create(self.context, inst_type, None)
|
||||
self.assertTrue(ref)
|
||||
|
||||
db.instance_destroy(self.context, ref[0]['id'])
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
inst_type, None)
|
||||
db.instance_destroy(self.context, refs[0]['id'])
|
||||
|
||||
def test_create_with_too_little_disk(self):
|
||||
"""Test an instance type with too little disk space"""
|
||||
@@ -1447,10 +1561,9 @@ class ComputeTestMinRamMinDisk(test.TestCase):
|
||||
|
||||
# Now increase the inst_type disk space and make sure all is fine.
|
||||
inst_type['local_gb'] = 2
|
||||
ref = self.compute_api.create(self.context, inst_type, None)
|
||||
self.assertTrue(ref)
|
||||
|
||||
db.instance_destroy(self.context, ref[0]['id'])
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
inst_type, None)
|
||||
db.instance_destroy(self.context, refs[0]['id'])
|
||||
|
||||
def test_create_just_enough_ram_and_disk(self):
|
||||
"""Test an instance type with just enough ram and disk space"""
|
||||
@@ -1466,10 +1579,9 @@ class ComputeTestMinRamMinDisk(test.TestCase):
|
||||
return img
|
||||
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
|
||||
|
||||
ref = self.compute_api.create(self.context, inst_type, None)
|
||||
self.assertTrue(ref)
|
||||
|
||||
db.instance_destroy(self.context, ref[0]['id'])
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
inst_type, None)
|
||||
db.instance_destroy(self.context, refs[0]['id'])
|
||||
|
||||
def test_create_with_no_ram_and_disk_reqs(self):
|
||||
"""Test an instance type with no min_ram or min_disk"""
|
||||
@@ -1482,7 +1594,6 @@ class ComputeTestMinRamMinDisk(test.TestCase):
|
||||
return copy(self.fake_image)
|
||||
self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
|
||||
|
||||
ref = self.compute_api.create(self.context, inst_type, None)
|
||||
self.assertTrue(ref)
|
||||
|
||||
db.instance_destroy(self.context, ref[0]['id'])
|
||||
(refs, resv_id) = self.compute_api.create(self.context,
|
||||
inst_type, None)
|
||||
db.instance_destroy(self.context, refs[0]['id'])
|
||||
|
||||
Reference in New Issue
Block a user