merged scheduler
This commit is contained in:
@@ -304,7 +304,10 @@ class APIRequestHandler(tornado.web.RequestHandler):
|
||||
try:
|
||||
failure.raiseException()
|
||||
except exception.ApiError as ex:
|
||||
self._error(type(ex).__name__ + "." + ex.code, ex.message)
|
||||
if ex.code:
|
||||
self._error(ex.code, ex.message)
|
||||
else:
|
||||
self._error(type(ex).__name__, ex.message)
|
||||
# TODO(vish): do something more useful with unknown exceptions
|
||||
except Exception as ex:
|
||||
self._error(type(ex).__name__, str(ex))
|
||||
|
@@ -33,6 +33,7 @@ from twisted.internet import defer
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import quota
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.auth import rbac
|
||||
@@ -45,6 +46,11 @@ FLAGS = flags.FLAGS
|
||||
flags.DECLARE('storage_availability_zone', 'nova.volume.manager')
|
||||
|
||||
|
||||
class QuotaError(exception.ApiError):
|
||||
"""Quota Exceeeded"""
|
||||
pass
|
||||
|
||||
|
||||
def _gen_key(user_id, key_name):
|
||||
""" Tuck this into AuthManager """
|
||||
try:
|
||||
@@ -278,6 +284,14 @@ class CloudController(object):
|
||||
|
||||
@rbac.allow('projectmanager', 'sysadmin')
|
||||
def create_volume(self, context, size, **kwargs):
|
||||
# check quota
|
||||
size = int(size)
|
||||
if quota.allowed_volumes(context, 1, size) < 1:
|
||||
logging.warn("Quota exceeeded for %s, tried to create %sG volume",
|
||||
context.project.id, size)
|
||||
raise QuotaError("Volume quota exceeded. You cannot "
|
||||
"create a volume of size %s" %
|
||||
size)
|
||||
vol = {}
|
||||
vol['size'] = size
|
||||
vol['user_id'] = context.user.id
|
||||
@@ -287,9 +301,11 @@ class CloudController(object):
|
||||
vol['attach_status'] = "detached"
|
||||
volume_ref = db.volume_create(context, vol)
|
||||
|
||||
rpc.cast(FLAGS.volume_topic, {"method": "create_volume",
|
||||
"args": {"context": None,
|
||||
"volume_id": volume_ref['id']}})
|
||||
rpc.cast(FLAGS.scheduler_topic,
|
||||
{"method": "create_volume",
|
||||
"args": {"context": None,
|
||||
"topic": FLAGS.volume_topic,
|
||||
"volume_id": volume_ref['id']}})
|
||||
|
||||
return {'volumeSet': [self._format_volume(context, volume_ref)]}
|
||||
|
||||
@@ -442,6 +458,12 @@ class CloudController(object):
|
||||
@rbac.allow('netadmin')
|
||||
@defer.inlineCallbacks
|
||||
def allocate_address(self, context, **kwargs):
|
||||
# check quota
|
||||
if quota.allowed_floating_ips(context, 1) < 1:
|
||||
logging.warn("Quota exceeeded for %s, tried to allocate address",
|
||||
context.project.id)
|
||||
raise QuotaError("Address quota exceeded. You cannot "
|
||||
"allocate any more addresses")
|
||||
network_topic = yield self._get_network_topic(context)
|
||||
public_ip = yield rpc.call(network_topic,
|
||||
{"method": "allocate_floating_ip",
|
||||
@@ -501,6 +523,22 @@ class CloudController(object):
|
||||
@rbac.allow('projectmanager', 'sysadmin')
|
||||
@defer.inlineCallbacks
|
||||
def run_instances(self, context, **kwargs):
|
||||
instance_type = kwargs.get('instance_type', 'm1.small')
|
||||
if instance_type not in INSTANCE_TYPES:
|
||||
raise exception.ApiError("Unknown instance type: %s",
|
||||
instance_type)
|
||||
# check quota
|
||||
max_instances = int(kwargs.get('max_count', 1))
|
||||
min_instances = int(kwargs.get('min_count', max_instances))
|
||||
num_instances = quota.allowed_instances(context,
|
||||
max_instances,
|
||||
instance_type)
|
||||
if num_instances < min_instances:
|
||||
logging.warn("Quota exceeeded for %s, tried to run %s instances",
|
||||
context.project.id, min_instances)
|
||||
raise QuotaError("Instance quota exceeded. You can only "
|
||||
"run %s more instances of this type." %
|
||||
num_instances, "InstanceLimitExceeded")
|
||||
# make sure user can access the image
|
||||
# vpn image is private so it doesn't show up on lists
|
||||
vpn = kwargs['image_id'] == FLAGS.vpn_image_id
|
||||
@@ -522,7 +560,7 @@ class CloudController(object):
|
||||
images.get(context, kernel_id)
|
||||
images.get(context, ramdisk_id)
|
||||
|
||||
logging.debug("Going to run instances...")
|
||||
logging.debug("Going to run %s instances...", num_instances)
|
||||
launch_time = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())
|
||||
key_data = None
|
||||
if kwargs.has_key('key_name'):
|
||||
@@ -547,10 +585,15 @@ class CloudController(object):
|
||||
base_options['user_id'] = context.user.id
|
||||
base_options['project_id'] = context.project.id
|
||||
base_options['user_data'] = kwargs.get('user_data', '')
|
||||
base_options['instance_type'] = kwargs.get('instance_type', 'm1.small')
|
||||
base_options['security_group'] = security_group
|
||||
base_options['instance_type'] = instance_type
|
||||
|
||||
for num in range(int(kwargs['max_count'])):
|
||||
type_data = INSTANCE_TYPES[instance_type]
|
||||
base_options['memory_mb'] = type_data['memory_mb']
|
||||
base_options['vcpus'] = type_data['vcpus']
|
||||
base_options['local_gb'] = type_data['local_gb']
|
||||
|
||||
for num in range(num_instances):
|
||||
instance_ref = db.instance_create(context, base_options)
|
||||
inst_id = instance_ref['id']
|
||||
|
||||
|
@@ -29,10 +29,10 @@ from nova.scheduler import driver
|
||||
from nova.scheduler import chance
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DEFINE_integer("max_instances", 16,
|
||||
"maximum number of instances to allow per host")
|
||||
flags.DEFINE_integer("max_volumes", 100,
|
||||
"maximum number of volumes to allow per host")
|
||||
flags.DEFINE_integer("max_cores", 16,
|
||||
"maximum number of instance cores to allow per host")
|
||||
flags.DEFINE_integer("max_gigabytes", 10000,
|
||||
"maximum number of volume gigabytes to allow per host")
|
||||
flags.DEFINE_integer("max_networks", 1000,
|
||||
"maximum number of networks to allow per host")
|
||||
|
||||
@@ -41,12 +41,12 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
|
||||
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest running instances."""
|
||||
|
||||
instance_ref = db.instance_get(context, instance_id)
|
||||
results = db.service_get_all_compute_sorted(context)
|
||||
for result in results:
|
||||
(service, instance_count) = result
|
||||
if instance_count >= FLAGS.max_instances:
|
||||
raise driver.NoValidHost("All hosts have too many instances")
|
||||
(service, instance_cores) = result
|
||||
if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
|
||||
raise driver.NoValidHost("All hosts have too many cores")
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
@@ -60,12 +60,12 @@ class SimpleScheduler(chance.ChanceScheduler):
|
||||
|
||||
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
|
||||
"""Picks a host that is up and has the fewest volumes."""
|
||||
|
||||
volume_ref = db.volume_get(context, volume_id)
|
||||
results = db.service_get_all_volume_sorted(context)
|
||||
for result in results:
|
||||
(service, instance_count) = result
|
||||
if instance_count >= FLAGS.max_volumes:
|
||||
raise driver.NoValidHost("All hosts have too many volumes")
|
||||
(service, volume_gigabytes) = result
|
||||
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
|
||||
raise driver.NoValidHost("All hosts have too many gigabytes")
|
||||
if self.service_is_up(service):
|
||||
# NOTE(vish): this probably belongs in the manager, if we
|
||||
# can generalize this somehow
|
||||
|
@@ -50,6 +50,7 @@ class ComputeTestCase(test.TrialTestCase):
|
||||
def tearDown(self): # pylint: disable-msg=C0103
|
||||
self.manager.delete_user(self.user)
|
||||
self.manager.delete_project(self.project)
|
||||
super(ComputeTestCase, self).tearDown()
|
||||
|
||||
def _create_instance(self):
|
||||
"""Create a test instance"""
|
||||
|
155
nova/tests/quota_unittest.py
Normal file
155
nova/tests/quota_unittest.py
Normal file
@@ -0,0 +1,155 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import quota
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.endpoint import cloud
|
||||
from nova.endpoint import api
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class QuotaTestCase(test.TrialTestCase):
|
||||
def setUp(self): # pylint: disable-msg=C0103
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
super(QuotaTestCase, self).setUp()
|
||||
self.flags(connection_type='fake',
|
||||
quota_instances=2,
|
||||
quota_cores=4,
|
||||
quota_volumes=2,
|
||||
quota_gigabytes=20,
|
||||
quota_floating_ips=1)
|
||||
|
||||
self.cloud = cloud.CloudController()
|
||||
self.manager = manager.AuthManager()
|
||||
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
|
||||
self.project = self.manager.create_project('admin', 'admin', 'admin')
|
||||
self.network = utils.import_object(FLAGS.network_manager)
|
||||
self.context = api.APIRequestContext(handler=None,
|
||||
project=self.project,
|
||||
user=self.user)
|
||||
|
||||
def tearDown(self): # pylint: disable-msg=C0103
|
||||
manager.AuthManager().delete_project(self.project)
|
||||
manager.AuthManager().delete_user(self.user)
|
||||
super(QuotaTestCase, self).tearDown()
|
||||
|
||||
def _create_instance(self, cores=2):
|
||||
"""Create a test instance"""
|
||||
inst = {}
|
||||
inst['image_id'] = 'ami-test'
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
inst['instance_type'] = 'm1.large'
|
||||
inst['vcpus'] = cores
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
return db.instance_create(self.context, inst)['id']
|
||||
|
||||
def _create_volume(self, size=10):
|
||||
"""Create a test volume"""
|
||||
vol = {}
|
||||
vol['user_id'] = self.user.id
|
||||
vol['project_id'] = self.project.id
|
||||
vol['size'] = size
|
||||
return db.volume_create(self.context, vol)['id']
|
||||
|
||||
def test_quota_overrides(self):
|
||||
"""Make sure overriding a projects quotas works"""
|
||||
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
|
||||
self.assertEqual(num_instances, 2)
|
||||
db.quota_create(self.context, {'project_id': self.project.id,
|
||||
'instances': 10})
|
||||
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
|
||||
self.assertEqual(num_instances, 4)
|
||||
db.quota_update(self.context, self.project.id, {'cores': 100})
|
||||
num_instances = quota.allowed_instances(self.context, 100, 'm1.small')
|
||||
self.assertEqual(num_instances, 10)
|
||||
db.quota_destroy(self.context, self.project.id)
|
||||
|
||||
def test_too_many_instances(self):
|
||||
instance_ids = []
|
||||
for i in range(FLAGS.quota_instances):
|
||||
instance_id = self._create_instance()
|
||||
instance_ids.append(instance_id)
|
||||
self.assertFailure(self.cloud.run_instances(self.context,
|
||||
min_count=1,
|
||||
max_count=1,
|
||||
instance_type='m1.small'),
|
||||
cloud.QuotaError)
|
||||
for instance_id in instance_ids:
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
|
||||
def test_too_many_cores(self):
|
||||
instance_ids = []
|
||||
instance_id = self._create_instance(cores=4)
|
||||
instance_ids.append(instance_id)
|
||||
self.assertFailure(self.cloud.run_instances(self.context,
|
||||
min_count=1,
|
||||
max_count=1,
|
||||
instance_type='m1.small'),
|
||||
cloud.QuotaError)
|
||||
for instance_id in instance_ids:
|
||||
db.instance_destroy(self.context, instance_id)
|
||||
|
||||
def test_too_many_volumes(self):
|
||||
volume_ids = []
|
||||
for i in range(FLAGS.quota_volumes):
|
||||
volume_id = self._create_volume()
|
||||
volume_ids.append(volume_id)
|
||||
self.assertRaises(cloud.QuotaError,
|
||||
self.cloud.create_volume,
|
||||
self.context,
|
||||
size=10)
|
||||
for volume_id in volume_ids:
|
||||
db.volume_destroy(self.context, volume_id)
|
||||
|
||||
def test_too_many_gigabytes(self):
|
||||
volume_ids = []
|
||||
volume_id = self._create_volume(size=20)
|
||||
volume_ids.append(volume_id)
|
||||
self.assertRaises(cloud.QuotaError,
|
||||
self.cloud.create_volume,
|
||||
self.context,
|
||||
size=10)
|
||||
for volume_id in volume_ids:
|
||||
db.volume_destroy(self.context, volume_id)
|
||||
|
||||
def test_too_many_addresses(self):
|
||||
address = '192.168.0.100'
|
||||
try:
|
||||
db.floating_ip_get_by_address(None, address)
|
||||
except exception.NotFound:
|
||||
db.floating_ip_create(None, {'address': address,
|
||||
'host': FLAGS.host})
|
||||
float_addr = self.network.allocate_floating_ip(self.context,
|
||||
self.project.id)
|
||||
# NOTE(vish): This assert never fails. When cloud attempts to
|
||||
# make an rpc.call, the test just finishes with OK. It
|
||||
# appears to be something in the magic inline callbacks
|
||||
# that is breaking.
|
||||
self.assertFailure(self.cloud.allocate_address(self.context),
|
||||
cloud.QuotaError)
|
@@ -19,8 +19,6 @@
|
||||
Tests For Scheduler
|
||||
"""
|
||||
|
||||
import mox
|
||||
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import service
|
||||
@@ -33,7 +31,7 @@ from nova.scheduler import driver
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
flags.DECLARE('max_instances', 'nova.scheduler.simple')
|
||||
flags.DECLARE('max_cores', 'nova.scheduler.simple')
|
||||
|
||||
class TestDriver(driver.Scheduler):
|
||||
"""Scheduler Driver for Tests"""
|
||||
@@ -75,7 +73,9 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
||||
def setUp(self): # pylint: disable-msg=C0103
|
||||
super(SimpleDriverTestCase, self).setUp()
|
||||
self.flags(connection_type='fake',
|
||||
max_instances=4,
|
||||
max_cores=4,
|
||||
max_gigabytes=4,
|
||||
volume_driver='nova.volume.driver.FakeAOEDriver',
|
||||
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
|
||||
self.scheduler = manager.SchedulerManager()
|
||||
self.context = None
|
||||
@@ -83,65 +83,149 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
||||
self.user = self.manager.create_user('fake', 'fake', 'fake')
|
||||
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
self.context = None
|
||||
self.service1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
self.service2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
|
||||
def tearDown(self): # pylint: disable-msg=C0103
|
||||
self.manager.delete_user(self.user)
|
||||
self.manager.delete_project(self.project)
|
||||
self.service1.kill()
|
||||
self.service2.kill()
|
||||
|
||||
def _create_instance(self):
|
||||
"""Create a test instance"""
|
||||
inst = {}
|
||||
inst['image_id'] = 'ami-test'
|
||||
inst['reservation_id'] = 'r-fakeres'
|
||||
inst['launch_time'] = '10'
|
||||
inst['user_id'] = self.user.id
|
||||
inst['project_id'] = self.project.id
|
||||
inst['instance_type'] = 'm1.tiny'
|
||||
inst['mac_address'] = utils.generate_mac()
|
||||
inst['ami_launch_index'] = 0
|
||||
inst['vcpus'] = 1
|
||||
return db.instance_create(self.context, inst)['id']
|
||||
|
||||
def _create_volume(self):
|
||||
"""Create a test volume"""
|
||||
vol = {}
|
||||
vol['image_id'] = 'ami-test'
|
||||
vol['reservation_id'] = 'r-fakeres'
|
||||
vol['size'] = 1
|
||||
return db.volume_create(self.context, vol)['id']
|
||||
|
||||
def test_hosts_are_up(self):
|
||||
"""Ensures driver can find the hosts that are up"""
|
||||
# NOTE(vish): constructing service without create method
|
||||
# because we are going to use it without queue
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
|
||||
self.assertEqual(len(hosts), 2)
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
def test_least_busy_host_gets_instance(self):
|
||||
instance_id = self._create_instance()
|
||||
self.service1.run_instance(self.context, instance_id)
|
||||
"""Ensures the host with less cores gets the next one"""
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
instance_id1 = self._create_instance()
|
||||
compute1.run_instance(self.context, instance_id1)
|
||||
instance_id2 = self._create_instance()
|
||||
host = self.scheduler.driver.schedule_run_instance(self.context,
|
||||
instance_id)
|
||||
instance_id2)
|
||||
self.assertEqual(host, 'host2')
|
||||
self.service1.terminate_instance(self.context, instance_id)
|
||||
compute1.terminate_instance(self.context, instance_id1)
|
||||
db.instance_destroy(self.context, instance_id2)
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
def test_too_many_instances(self):
|
||||
def test_too_many_cores(self):
|
||||
"""Ensures we don't go over max cores"""
|
||||
compute1 = service.Service('host1',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
instance_ids1 = []
|
||||
instance_ids2 = []
|
||||
for index in xrange(FLAGS.max_instances):
|
||||
for index in xrange(FLAGS.max_cores):
|
||||
instance_id = self._create_instance()
|
||||
self.service1.run_instance(self.context, instance_id)
|
||||
compute1.run_instance(self.context, instance_id)
|
||||
instance_ids1.append(instance_id)
|
||||
instance_id = self._create_instance()
|
||||
self.service2.run_instance(self.context, instance_id)
|
||||
compute2.run_instance(self.context, instance_id)
|
||||
instance_ids2.append(instance_id)
|
||||
instance_id = self._create_instance()
|
||||
self.assertRaises(driver.NoValidHost,
|
||||
self.scheduler.driver.schedule_run_instance,
|
||||
self.context,
|
||||
'compute',
|
||||
instance_id)
|
||||
for instance_id in instance_ids1:
|
||||
self.service1.terminate_instance(self.context, instance_id)
|
||||
compute1.terminate_instance(self.context, instance_id)
|
||||
for instance_id in instance_ids2:
|
||||
self.service2.terminate_instance(self.context, instance_id)
|
||||
compute2.terminate_instance(self.context, instance_id)
|
||||
compute1.kill()
|
||||
compute2.kill()
|
||||
|
||||
def test_least_busy_host_gets_volume(self):
|
||||
"""Ensures the host with less gigabytes gets the next one"""
|
||||
volume1 = service.Service('host1',
|
||||
'nova-volume',
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume2 = service.Service('host2',
|
||||
'nova-volume',
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume_id1 = self._create_volume()
|
||||
volume1.create_volume(self.context, volume_id1)
|
||||
volume_id2 = self._create_volume()
|
||||
host = self.scheduler.driver.schedule_create_volume(self.context,
|
||||
volume_id2)
|
||||
self.assertEqual(host, 'host2')
|
||||
volume1.delete_volume(self.context, volume_id1)
|
||||
db.volume_destroy(self.context, volume_id2)
|
||||
volume1.kill()
|
||||
volume2.kill()
|
||||
|
||||
def test_too_many_gigabytes(self):
|
||||
"""Ensures we don't go over max gigabytes"""
|
||||
volume1 = service.Service('host1',
|
||||
'nova-volume',
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume2 = service.Service('host2',
|
||||
'nova-volume',
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume_ids1 = []
|
||||
volume_ids2 = []
|
||||
for index in xrange(FLAGS.max_gigabytes):
|
||||
volume_id = self._create_volume()
|
||||
volume1.create_volume(self.context, volume_id)
|
||||
volume_ids1.append(volume_id)
|
||||
volume_id = self._create_volume()
|
||||
volume2.create_volume(self.context, volume_id)
|
||||
volume_ids2.append(volume_id)
|
||||
volume_id = self._create_volume()
|
||||
self.assertRaises(driver.NoValidHost,
|
||||
self.scheduler.driver.schedule_create_volume,
|
||||
self.context,
|
||||
volume_id)
|
||||
for volume_id in volume_ids1:
|
||||
volume1.delete_volume(self.context, volume_id)
|
||||
for volume_id in volume_ids2:
|
||||
volume2.delete_volume(self.context, volume_id)
|
||||
volume1.kill()
|
||||
volume2.kill()
|
||||
|
@@ -58,6 +58,7 @@ from nova.tests.flags_unittest import *
|
||||
from nova.tests.network_unittest import *
|
||||
from nova.tests.objectstore_unittest import *
|
||||
from nova.tests.process_unittest import *
|
||||
from nova.tests.quota_unittest import *
|
||||
from nova.tests.rpc_unittest import *
|
||||
from nova.tests.scheduler_unittest import *
|
||||
from nova.tests.service_unittest import *
|
||||
|
Reference in New Issue
Block a user