Generic benchmark cleanup
Provides implementation for generic benchmark cleanup which is run with the same concurrency as specified in the benchmark's configuration. The cleanup logic encompasses the common resources which would be created via benchmarks such as servers, images, volumes, etc. Implements: blueprint benchmark-cloud-cleanup Change-Id: I9e9a66795823c68486aa33d79d312373d825fe43
This commit is contained in:
85
rally/benchmark/cleanup_utils.py
Normal file
85
rally/benchmark/cleanup_utils.py
Normal file
@@ -0,0 +1,85 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2013 IBM Corp.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from novaclient import exceptions as nova_exceptions
|
||||
import utils
|
||||
|
||||
|
||||
def _delete_servers(nova):
|
||||
for server in nova.servers.list():
|
||||
server.delete()
|
||||
utils._wait_for_empty_list(nova.servers, timeout=600, check_interval=3)
|
||||
|
||||
|
||||
def _delete_keypairs(nova):
|
||||
for keypair in nova.keypairs.list():
|
||||
keypair.delete()
|
||||
utils._wait_for_empty_list(nova.keypairs)
|
||||
|
||||
|
||||
def _delete_security_groups(nova):
|
||||
for group in nova.security_groups.list():
|
||||
try:
|
||||
group.delete()
|
||||
except nova_exceptions.BadRequest as br:
|
||||
#TODO(boden): find a way to determine default security group
|
||||
if not br.message.startswith('Unable to delete system group'):
|
||||
raise br
|
||||
utils._wait_for_list_size(nova.security_groups, sizes=[0, 1])
|
||||
|
||||
|
||||
def _delete_images(glance, project_uuid):
|
||||
for image in glance.images.list(owner=project_uuid):
|
||||
image.delete()
|
||||
utils._wait_for_list_statuses(glance.images, statuses=["DELETED"],
|
||||
list_query={'owner': project_uuid},
|
||||
timeout=600, check_interval=3)
|
||||
|
||||
|
||||
def _delete_networks(nova):
|
||||
for network in nova.networks.list():
|
||||
network.delete()
|
||||
utils._wait_for_empty_list(nova.networks)
|
||||
|
||||
|
||||
def _delete_volumes(cinder):
|
||||
for vol in cinder.volumes.list():
|
||||
vol.delete()
|
||||
utils._wait_for_empty_list(cinder.volumes, timeout=120)
|
||||
|
||||
|
||||
def _delete_volume_types(cinder):
|
||||
for vol_type in cinder.volume_types.list():
|
||||
cinder.volume_types.delete(vol_type.id)
|
||||
utils._wait_for_empty_list(cinder.volume_types)
|
||||
|
||||
|
||||
def _delete_volume_transfers(cinder):
|
||||
for transfer in cinder.transfers.list():
|
||||
transfer.delete()
|
||||
utils._wait_for_empty_list(cinder.transfers)
|
||||
|
||||
|
||||
def _delete_volume_snapshots(cinder):
|
||||
for snapshot in cinder.volume_snapshots.list():
|
||||
snapshot.delete()
|
||||
utils._wait_for_empty_list(cinder.volume_snapshots, timeout=240)
|
||||
|
||||
|
||||
def _delete_volume_backups(cinder):
|
||||
for backup in cinder.backups.list():
|
||||
backup.delete()
|
||||
utils._wait_for_empty_list(cinder.backups, timeout=240)
|
@@ -25,6 +25,7 @@ import uuid
|
||||
import fuel_health.cleanup as fuel_cleanup
|
||||
|
||||
from rally.benchmark import base
|
||||
from rally.benchmark import cleanup_utils
|
||||
from rally import exceptions as rally_exceptions
|
||||
from rally.openstack.common.gettextutils import _ # noqa
|
||||
from rally.openstack.common import log as logging
|
||||
@@ -133,10 +134,6 @@ def _run_scenario_loop(args):
|
||||
return {"time": timer.duration() - cls.idle_time,
|
||||
"idle_time": cls.idle_time, "error": None}
|
||||
|
||||
# NOTE(msdubov): Cleaning up after each scenario loop enables to delete
|
||||
# the resources of the user the scenario was run from.
|
||||
cls.cleanup()
|
||||
|
||||
|
||||
def _create_openstack_clients(users_endpoints, keys):
|
||||
# NOTE(msdubov): Creating here separate openstack clients for each of
|
||||
@@ -193,6 +190,49 @@ class ScenarioRunner(object):
|
||||
temporary_endpoints.append(user_credentials)
|
||||
return temporary_endpoints
|
||||
|
||||
@classmethod
|
||||
def _delete_nova_resources(cls, nova):
|
||||
cleanup_utils._delete_servers(nova)
|
||||
cleanup_utils._delete_keypairs(nova)
|
||||
cleanup_utils._delete_security_groups(nova)
|
||||
cleanup_utils._delete_networks(nova)
|
||||
|
||||
@classmethod
|
||||
def _delete_cinder_resources(cls, cinder):
|
||||
cleanup_utils._delete_volume_transfers(cinder)
|
||||
cleanup_utils._delete_volumes(cinder)
|
||||
cleanup_utils._delete_volume_types(cinder)
|
||||
cleanup_utils._delete_volume_snapshots(cinder)
|
||||
cleanup_utils._delete_volume_backups(cinder)
|
||||
|
||||
@classmethod
|
||||
def _delete_glance_resources(cls, glance, project_uuid):
|
||||
cleanup_utils._delete_images(glance, project_uuid)
|
||||
|
||||
@classmethod
|
||||
def _cleanup_with_clients(cls, indexes):
|
||||
for index in indexes:
|
||||
clients = __openstack_clients__[index]
|
||||
try:
|
||||
cls._delete_nova_resources(clients["nova"])
|
||||
cls._delete_glance_resources(clients["glance"],
|
||||
clients["keystone"].project_id)
|
||||
cls._delete_cinder_resources(clients["cinder"])
|
||||
except Exception as e:
|
||||
LOG.exception(_('Encountered error during cleanup: %s') %
|
||||
(e.message))
|
||||
|
||||
def _cleanup_scenario(self, concurrent):
|
||||
indexes = range(0, len(__openstack_clients__))
|
||||
chunked_indexes = [indexes[i:i + concurrent]
|
||||
for i in range(0, len(indexes), concurrent)]
|
||||
pool = multiprocessing.Pool(concurrent)
|
||||
for client_indicies in chunked_indexes:
|
||||
pool.apply_async(_async_cleanup, args=(ScenarioRunner,
|
||||
client_indicies,))
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
||||
def _delete_temp_tenants_and_users(self):
|
||||
for user in self.users:
|
||||
user.delete()
|
||||
@@ -303,6 +343,7 @@ class ScenarioRunner(object):
|
||||
results = self._run_scenario(cls, method_name, args,
|
||||
execution_type, config)
|
||||
|
||||
self._cleanup_scenario(config.get("active_users", 1))
|
||||
self._delete_temp_tenants_and_users()
|
||||
|
||||
return results
|
||||
|
@@ -33,6 +33,7 @@ class FakeResource(object):
|
||||
self.status = "ACTIVE"
|
||||
self.manager = manager
|
||||
self.uuid = uuid.uuid4()
|
||||
self.id = self.uuid
|
||||
|
||||
def __getattr__(self, name):
|
||||
# NOTE(msdubov): e.g. server.delete() -> manager.delete(server)
|
||||
@@ -58,7 +59,9 @@ class FakeFailedServer(FakeResource):
|
||||
|
||||
|
||||
class FakeImage(FakeResource):
|
||||
pass
|
||||
|
||||
def __init__(self, manager=None):
|
||||
super(FakeImage, self).__init__(manager)
|
||||
|
||||
|
||||
class FakeFloatingIP(FakeResource):
|
||||
@@ -77,6 +80,34 @@ class FakeNetwork(FakeResource):
|
||||
pass
|
||||
|
||||
|
||||
class FakeKeypair(FakeResource):
|
||||
pass
|
||||
|
||||
|
||||
class FakeSecurityGroup(FakeResource):
|
||||
pass
|
||||
|
||||
|
||||
class FakeVolume(FakeResource):
|
||||
pass
|
||||
|
||||
|
||||
class FakeVolumeType(FakeResource):
|
||||
pass
|
||||
|
||||
|
||||
class FakeVolumeTransfer(FakeResource):
|
||||
pass
|
||||
|
||||
|
||||
class FakeVolumeSnapshot(FakeResource):
|
||||
pass
|
||||
|
||||
|
||||
class FakeVolumeBackup(FakeResource):
|
||||
pass
|
||||
|
||||
|
||||
class FakeManager(object):
|
||||
|
||||
def __init__(self):
|
||||
@@ -84,12 +115,11 @@ class FakeManager(object):
|
||||
self.cache = {}
|
||||
|
||||
def get(self, resource):
|
||||
if resource == 'img_uuid':
|
||||
return 'img_uuid'
|
||||
return self.cache.get(resource.uuid, None)
|
||||
uuid = getattr(resource, 'uuid', None) or resource
|
||||
return self.cache.get(uuid, None)
|
||||
|
||||
def delete(self, resource):
|
||||
cached = self.cache.get(resource.uuid, None)
|
||||
cached = self.get(resource)
|
||||
if cached is not None:
|
||||
del self.cache[cached.uuid]
|
||||
|
||||
@@ -106,8 +136,9 @@ class FakeManager(object):
|
||||
|
||||
class FakeServerManager(FakeManager):
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, image_mgr=None):
|
||||
super(FakeServerManager, self).__init__()
|
||||
self.images = image_mgr or FakeImageManager()
|
||||
|
||||
def get(self, resource):
|
||||
server = self.cache.get(resource.uuid, None)
|
||||
@@ -125,7 +156,8 @@ class FakeServerManager(FakeManager):
|
||||
return self._create(name=name)
|
||||
|
||||
def create_image(self, server, name):
|
||||
return "img_uuid"
|
||||
image = self.images.create()
|
||||
return image.uuid
|
||||
|
||||
def add_floating_ip(self, server, fip):
|
||||
pass
|
||||
@@ -143,7 +175,12 @@ class FakeFailedServerManager(FakeServerManager):
|
||||
class FakeImageManager(FakeManager):
|
||||
|
||||
def create(self):
|
||||
return FakeImage(self)
|
||||
return self._cache(FakeImage(self))
|
||||
|
||||
def delete(self, image):
|
||||
cached = self.cache.get(image.uuid, None)
|
||||
if cached is not None:
|
||||
cached.status = "DELETED"
|
||||
|
||||
|
||||
class FakeFloatingIPsManager(FakeManager):
|
||||
@@ -166,22 +203,96 @@ class FakeNetworkManager(FakeManager):
|
||||
return self._cache(net)
|
||||
|
||||
|
||||
class FakeKeypairManager(FakeManager):
|
||||
|
||||
def create(self, name):
|
||||
kp = FakeKeypair(self)
|
||||
kp.name = name or kp.name
|
||||
return self._cache(kp)
|
||||
|
||||
|
||||
class FakeSecurityGroupManager(FakeManager):
|
||||
|
||||
def create(self, name):
|
||||
sg = FakeSecurityGroup(self)
|
||||
sg.name = name or sg.name
|
||||
return self._cache(sg)
|
||||
|
||||
|
||||
class FakeUsersManager(FakeManager):
|
||||
|
||||
def create(self, username, password, email, tenant_id):
|
||||
return FakeUser(self)
|
||||
|
||||
|
||||
class FakeVolumeManager(FakeManager):
|
||||
|
||||
def create(self, name=None):
|
||||
volume = FakeVolume(self)
|
||||
volume.name = name or volume.name
|
||||
return self._cache(volume)
|
||||
|
||||
|
||||
class FakeVolumeTypeManager(FakeManager):
|
||||
|
||||
def create(self, name):
|
||||
vol_type = FakeVolumeType(self)
|
||||
vol_type.name = name or vol_type.name
|
||||
return self._cache(vol_type)
|
||||
|
||||
|
||||
class FakeVolumeTransferManager(FakeManager):
|
||||
|
||||
def create(self, name):
|
||||
transfer = FakeVolumeTransfer(self)
|
||||
transfer.name = name or transfer.name
|
||||
return self._cache(transfer)
|
||||
|
||||
|
||||
class FakeVolumeSnapshotManager(FakeManager):
|
||||
|
||||
def create(self, name):
|
||||
snapshot = FakeVolumeSnapshot(self)
|
||||
snapshot.name = name or snapshot.name
|
||||
return self._cache(snapshot)
|
||||
|
||||
|
||||
class FakeVolumeBackupManager(FakeManager):
|
||||
|
||||
def create(self, name):
|
||||
backup = FakeVolumeBackup(self)
|
||||
backup.name = name or backup.name
|
||||
return self._cache(backup)
|
||||
|
||||
|
||||
class FakeGlanceClient(object):
|
||||
|
||||
def __init__(self, nova_client):
|
||||
self.images = nova_client.images
|
||||
|
||||
|
||||
class FakeCinderClient(object):
|
||||
|
||||
def __init__(self):
|
||||
self.volumes = FakeVolumeManager()
|
||||
self.volume_types = FakeVolumeTypeManager()
|
||||
self.transfers = FakeVolumeTransferManager()
|
||||
self.volume_snapshots = FakeVolumeSnapshotManager()
|
||||
self.backups = FakeVolumeBackupManager()
|
||||
|
||||
|
||||
class FakeNovaClient(object):
|
||||
|
||||
def __init__(self, failed_server_manager=False):
|
||||
if failed_server_manager:
|
||||
self.servers = FakeFailedServerManager()
|
||||
else:
|
||||
self.servers = FakeServerManager()
|
||||
self.images = FakeImageManager()
|
||||
if failed_server_manager:
|
||||
self.servers = FakeFailedServerManager(self.images)
|
||||
else:
|
||||
self.servers = FakeServerManager(self.images)
|
||||
self.floating_ips = FakeFloatingIPsManager()
|
||||
self.networks = FakeNetworkManager()
|
||||
self.keypairs = FakeKeypairManager()
|
||||
self.security_groups = FakeSecurityGroupManager()
|
||||
|
||||
|
||||
class FakeKeystoneClient(object):
|
||||
@@ -189,21 +300,40 @@ class FakeKeystoneClient(object):
|
||||
def __init__(self):
|
||||
self.tenants = FakeTenantsManager()
|
||||
self.users = FakeUsersManager()
|
||||
self.project_id = 'abc123'
|
||||
|
||||
|
||||
class FakeClients(object):
|
||||
|
||||
def __init__(self):
|
||||
self.nova = None
|
||||
self.glance = None
|
||||
self.keystone = None
|
||||
self.cinder = None
|
||||
|
||||
def get_keystone_client(self):
|
||||
return FakeKeystoneClient()
|
||||
if self.keystone is not None:
|
||||
return self.keystone
|
||||
self.keystone = FakeKeystoneClient()
|
||||
return self.keystone
|
||||
|
||||
def get_nova_client(self):
|
||||
return FakeNovaClient()
|
||||
if self.nova is not None:
|
||||
return self.nova
|
||||
self.nova = FakeNovaClient()
|
||||
return self.nova
|
||||
|
||||
def get_glance_client(self):
|
||||
return "glance"
|
||||
if self.glance is not None:
|
||||
return self.glance
|
||||
self.glance = FakeGlanceClient(self.get_nova_client())
|
||||
return self.glance
|
||||
|
||||
def get_cinder_client(self):
|
||||
return "cinder"
|
||||
if self.cinder is not None:
|
||||
return self.cinder
|
||||
self.cinder = FakeCinderClient()
|
||||
return self.cinder
|
||||
|
||||
|
||||
class NovaScenarioTestCase(test.TestCase):
|
||||
@@ -240,18 +370,17 @@ class NovaScenarioTestCase(test.TestCase):
|
||||
mock_osclients.Clients.return_value = fc
|
||||
fake_nova = FakeNovaClient()
|
||||
fc.get_nova_client = lambda: fake_nova
|
||||
fsm = FakeServerManager()
|
||||
fsm = FakeServerManager(fake_nova.images)
|
||||
fake_server = fsm.create("s1", "i1", 1)
|
||||
fsm.create = lambda name, iid, fid: fake_server
|
||||
fake_nova.servers = fsm
|
||||
fake_image_id = fsm.create_image(fake_server, 'img')
|
||||
fake_image = fake_nova.images.get(fake_image_id)
|
||||
fake_image = fsm.images.get(fake_image_id)
|
||||
fsm.create_image = lambda svr, name: fake_image
|
||||
temp_keys = ["username", "password", "tenant_name", "uri"]
|
||||
users_endpoints = [dict(zip(temp_keys, temp_keys))]
|
||||
utils.NovaScenario._clients = butils.\
|
||||
_create_openstack_clients(users_endpoints, temp_keys)[0]
|
||||
|
||||
utils.utils = mock_rally_utils
|
||||
utils.bench_utils.get_from_manager = lambda: get_from_mgr
|
||||
|
||||
|
@@ -23,6 +23,7 @@ from rally import test
|
||||
class ScenarioTestCase(test.TestCase):
|
||||
|
||||
def test_register(self):
|
||||
base.Scenario.registred = False
|
||||
with mock.patch("rally.benchmark.base.utils") as mock_utils:
|
||||
base.Scenario.register()
|
||||
base.Scenario.register()
|
||||
|
@@ -53,6 +53,21 @@ class FakeTimer(rally_utils.Timer):
|
||||
return 10
|
||||
|
||||
|
||||
class MockedPool(object):
|
||||
|
||||
def __init__(self, concurrent=1):
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
pass
|
||||
|
||||
def join(self):
|
||||
pass
|
||||
|
||||
def apply_async(self, func, args=()):
|
||||
func(*args)
|
||||
|
||||
|
||||
class ScenarioTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
@@ -125,7 +140,9 @@ class ScenarioTestCase(test.TestCase):
|
||||
"timeout": 0.01})
|
||||
self.assertEqual(len(results), times)
|
||||
for r in results:
|
||||
self.assertEqual(r['time'], 0.01)
|
||||
#NOTE(boden): parrallelized tests can't ensure exactly 0.01
|
||||
if r['time'] < 0.01:
|
||||
self.assertFalse(True, "Premature timeout")
|
||||
self.assertEqual(r['error'][0],
|
||||
str(multiprocessing.TimeoutError))
|
||||
|
||||
@@ -321,6 +338,83 @@ class ScenarioTestCase(test.TestCase):
|
||||
]
|
||||
self.assertEqual(FakeScenario.mock_calls, expected)
|
||||
|
||||
@mock.patch("rally.benchmark.utils._create_openstack_clients")
|
||||
@mock.patch("rally.benchmark.utils.base")
|
||||
@mock.patch("rally.benchmark.utils.osclients")
|
||||
@mock.patch("multiprocessing.Pool")
|
||||
def test_generic_cleanup(self, mock_pool, mock_osclients,
|
||||
mock_base, mock_clients):
|
||||
FakeScenario = mock.MagicMock()
|
||||
FakeScenario.init = mock.MagicMock(return_value={})
|
||||
|
||||
mock_cms = [test_utils.FakeClients(), test_utils.FakeClients(),
|
||||
test_utils.FakeClients()]
|
||||
clients = [
|
||||
dict((
|
||||
("nova", cl.get_nova_client()),
|
||||
("keystone", cl.get_keystone_client()),
|
||||
("glance", cl.get_glance_client()),
|
||||
("cinder", cl.get_cinder_client())
|
||||
)) for cl in mock_cms
|
||||
]
|
||||
mock_clients.return_value = clients
|
||||
|
||||
runner = utils.ScenarioRunner(mock.MagicMock(), self.fake_kw)
|
||||
runner._run_scenario = mock.MagicMock(return_value="result")
|
||||
runner._create_temp_tenants_and_users = mock.MagicMock(
|
||||
return_value=[])
|
||||
runner._delete_temp_tenants_and_users = mock.MagicMock()
|
||||
|
||||
mock_base.Scenario.get_by_name = \
|
||||
mock.MagicMock(return_value=FakeScenario)
|
||||
|
||||
for index in range(len(clients)):
|
||||
client = clients[index]
|
||||
nova = client["nova"]
|
||||
cinder = client["cinder"]
|
||||
for count in range(3):
|
||||
uid = index + count
|
||||
img = nova.images.create()
|
||||
nova.servers.create("svr-%s" % (uid), img.uuid, index)
|
||||
nova.keypairs.create("keypair-%s" % (uid))
|
||||
nova.security_groups.create("secgroup-%s" % (uid))
|
||||
nova.networks.create("net-%s" % (uid))
|
||||
cinder.volumes.create("vol-%s" % (uid))
|
||||
cinder.volume_types.create("voltype-%s" % (uid))
|
||||
cinder.transfers.create("voltransfer-%s" % (uid))
|
||||
cinder.volume_snapshots.create("snap-%s" % (uid))
|
||||
cinder.backups.create("backup-%s" % (uid))
|
||||
|
||||
mock_pool.return_value = MockedPool()
|
||||
|
||||
runner.run("FakeScenario.do_it",
|
||||
{"args": {"a": 1}, "init": {"arg": 1},
|
||||
"config": {"timeout": 1, "times": 2, "active_users": 3,
|
||||
"tenants": 5, "users_per_tenant": 2}})
|
||||
|
||||
def _assert_purged(manager, resource_type):
|
||||
resources = manager.list()
|
||||
self.assertEqual([], resources, "%s not purged: %s" %
|
||||
(resource_type, resources))
|
||||
|
||||
for client in clients:
|
||||
nova = client["nova"]
|
||||
cinder = client["cinder"]
|
||||
_assert_purged(nova.servers, "servers")
|
||||
_assert_purged(nova.keypairs, "key pairs")
|
||||
_assert_purged(nova.security_groups, "security groups")
|
||||
_assert_purged(nova.networks, "networks")
|
||||
|
||||
_assert_purged(cinder.volumes, "volumes")
|
||||
_assert_purged(cinder.volume_types, "volume types")
|
||||
_assert_purged(cinder.backups, "volume backups")
|
||||
_assert_purged(cinder.transfers, "volume transfers")
|
||||
_assert_purged(cinder.volume_snapshots, "volume snapshots")
|
||||
|
||||
for image in nova.images.list():
|
||||
self.assertEqual("DELETED", image.status,
|
||||
"image not purged: %s" % (image))
|
||||
|
||||
|
||||
def test_dummy_1():
|
||||
pass
|
||||
|
@@ -65,7 +65,28 @@ class FakeEngine(deploy.EngineFactory):
|
||||
self.cleanuped = True
|
||||
|
||||
|
||||
class EngineMixIn(object):
|
||||
def deploy(self):
|
||||
pass
|
||||
|
||||
def cleanup(self):
|
||||
pass
|
||||
|
||||
|
||||
class EngineFake1(EngineMixIn, deploy.EngineFactory):
|
||||
pass
|
||||
|
||||
|
||||
class EngineFake2(EngineMixIn, deploy.EngineFactory):
|
||||
pass
|
||||
|
||||
|
||||
class EngineFake3(EngineFake2):
|
||||
pass
|
||||
|
||||
|
||||
class EngineFactoryTestCase(test.TestCase):
|
||||
FAKE_ENGINES = [EngineFake1, EngineFake2, EngineFake3]
|
||||
|
||||
@mock.patch.object(FakeDeployment, 'update_status')
|
||||
def test_get_engine_not_found(self, mock_update_status):
|
||||
@@ -187,28 +208,9 @@ class EngineFactoryTestCase(test.TestCase):
|
||||
self.assertFalse(engine.cleanuped)
|
||||
self.assertFalse(engine.deployed)
|
||||
|
||||
def _create_fake_engines(self):
|
||||
class EngineMixIn(object):
|
||||
def deploy(self):
|
||||
pass
|
||||
|
||||
def cleanup(self):
|
||||
pass
|
||||
|
||||
class EngineFake1(EngineMixIn, deploy.EngineFactory):
|
||||
pass
|
||||
|
||||
class EngineFake2(EngineMixIn, deploy.EngineFactory):
|
||||
pass
|
||||
|
||||
class EngineFake3(EngineFake2):
|
||||
pass
|
||||
|
||||
return [EngineFake1, EngineFake2, EngineFake3]
|
||||
|
||||
def test_get_engine(self):
|
||||
deployment = make_fake_deployment()
|
||||
engines = self._create_fake_engines()
|
||||
engines = EngineFactoryTestCase.FAKE_ENGINES
|
||||
for e in engines:
|
||||
engine_inst = deploy.EngineFactory.get_engine(e.__name__,
|
||||
deployment)
|
||||
@@ -216,7 +218,7 @@ class EngineFactoryTestCase(test.TestCase):
|
||||
self.assertEqual(str(type(engine_inst)), str(e))
|
||||
|
||||
def test_get_available_engines(self):
|
||||
engines = set([e.__name__ for e in self._create_fake_engines()])
|
||||
engines = set([e.__name__ for e in EngineFactoryTestCase.FAKE_ENGINES])
|
||||
real_engines = set(deploy.EngineFactory.get_available_engines())
|
||||
self.assertEqual(engines & real_engines, engines)
|
||||
|
||||
|
Reference in New Issue
Block a user