Add generic int-test classes

The int-tests in Trove are very MySQL specific, which makes it difficult
to reuse code for other datastores.  This changeset breaks them down
into 'groups' and 'runners.'  Runners can be over-ridden to add
datastore specific handling/tests.  This should allow most generic
code to be reused across datastores, while also providing for datastore
specific enhancements.

Runner implementations are stored in a new package
'trove.tests.scenario.runners'.  A datastore-specific implementation can
be added to the appropriate runner module file. Its name has to match
'PrefixBaseRunnerClassName' pattern, where 'BaseRunnerClassName' is the
name of the default runner and 'Prefix' is the datastore's manager
name with the first letter capitalized.

Example:
    Given the default implementation for negative cluster tests in
    'trove.tests.api.runners.NegativeClusterActionsGroup'.  One can
    provide a custom implementation for MongoDB (with manager mongodb)
    in 'trove.tests.api.runners.MongodbNegativeClusterActionsRunner'

This initial changeset adds tests for basic actions on instances
and clusters.  Some basic replication tests were also migrated.

The concept of a helper class for datastore specific activies
was also created.  This makes it easy to have tests use standard
methods of adding data and verifying that the datastore behaves
as it should.

Vertica was refactored to use the new infrastructure.

Running the tests can be accomplished by specifying one of the
new groups in int-tests (see int_tests.py for the complete list):

./redstack kick-start mongodb
./redstack int-tests --group=instance_actions --group=cluster
or
./redstack int-tests --group=mongodb_supported (to run all
tests supported by the MongoDB datastore)

As with the original int-tests, the datastore used is the one
referenced in test configuration file (test.conf) under the
key dbaas_datastore.  This key is automatically set when
kick-start is run.

Additional Notes:

Also temporarily disabled volume size check in
instances tests.
It is supposed to assert that the used space on the
Trove volume is less that the size of the volume.
It however often fails because 'used' > 'size'.
From inspection of the instance it appears that the reported
'used' space is from the root volume instead of the
attached Trove volume. Plus it sometimes returns int instead of float.

Change-Id: I34fb974a32dc1b457026f5b9d98e20d1c7219009
Authored-By: Petr Malik <pmalik@tesora.com>
Co-Authored-By: Peter Stachowski <peter@tesora.com>
This commit is contained in:
Peter Stachowski 2015-08-07 18:02:16 -04:00
parent 7cf297cd22
commit 16d91d9838
26 changed files with 1619 additions and 356 deletions

View File

@ -28,7 +28,7 @@
"trove_max_instances_per_user": 55,
"trove_max_volumes_per_user": 100,
"use_reaper":false,
"root_removed_from_instance_api": true,
"root_removed_from_instance_api": true,
"root_timestamp_disabled": false,
"openvz_disabled": false,
"management_api_disabled": true

View File

@ -175,14 +175,8 @@ class MethodInspector(object):
return "%s %s" % (self._func.__name__, args_str)
def poll_until(retriever, condition=lambda value: value,
sleep_time=1, time_out=None):
"""Retrieves object until it passes condition, then returns it.
If time_out_limit is passed in, PollTimeOut will be raised once that
amount of time is eclipsed.
"""
def build_polling_task(retriever, condition=lambda value: value,
sleep_time=1, time_out=None):
start_time = time.time()
def poll_and_check():
@ -192,11 +186,21 @@ def poll_until(retriever, condition=lambda value: value,
if time_out is not None and time.time() - start_time > time_out:
raise exception.PollTimeOut
lc = loopingcall.FixedIntervalLoopingCall(
f=poll_and_check).start(
sleep_time, True)
return loopingcall.FixedIntervalLoopingCall(
f=poll_and_check).start(sleep_time, True)
return lc.wait()
def poll_until(retriever, condition=lambda value: value,
sleep_time=1, time_out=None):
"""Retrieves object until it passes condition, then returns it.
If time_out_limit is passed in, PollTimeOut will be raised once that
amount of time is eclipsed.
"""
return build_polling_task(retriever, condition=condition,
sleep_time=sleep_time, time_out=time_out).wait()
# Copied from nova.api.openstack.common in the old code.

View File

@ -105,7 +105,8 @@ class BaseDbStatus(object):
def set_status(self, status):
"""Use conductor to update the DB app status."""
LOG.debug("Casting set_status message to conductor.")
LOG.debug("Casting set_status message to conductor (status is '%s')." %
status.description)
context = trove_context.TroveContext()
heartbeat = {

View File

@ -622,7 +622,7 @@ class WaitForConfigurationInstanceToFinish(ConfigurationsTestBase):
@test(depends_on=[test_instance_with_configuration_active])
@time_out(30)
def test_get_configuration_details_from_instance_validation(self):
# validate that the configuraiton was applied correctly to the instance
# validate that the configuration was applied correctly to the instance
inst = instance_info.dbaas.instances.get(configuration_instance.id)
configuration_id = inst.configuration['id']
assert_not_equal(None, inst.configuration['id'])

View File

@ -141,6 +141,7 @@ class InstanceTestInfo(object):
# existing.
instance_info = InstanceTestInfo()
dbaas = None # Rich client used throughout this test.
dbaas_admin = None # Same as above, with admin privs.
ROOT_ON_CREATE = CONFIG.get('root_on_create', False)
VOLUME_SUPPORT = CONFIG.get('trove_volume_support', False)
@ -283,7 +284,8 @@ class CreateInstanceQuotaTest(unittest.TestCase):
if VOLUME_SUPPORT:
assert_equal(CONFIG.trove_max_volumes_per_user,
verify_quota['volumes'])
self.test_info.volume = {'size': 1}
self.test_info.volume = {'size':
CONFIG.get('trove_volume_size', 1)}
self.test_info.name = "too_many_instances"
assert_raises(exceptions.OverLimit,
@ -344,7 +346,7 @@ class CreateInstanceFail(object):
def test_create_with_bad_availability_zone(self):
instance_name = "instance-failure-with-bad-ephemeral"
if VOLUME_SUPPORT:
volume = {'size': 1}
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
databases = []
@ -363,7 +365,7 @@ class CreateInstanceFail(object):
def test_create_with_bad_nics(self):
instance_name = "instance-failure-with-bad-nics"
if VOLUME_SUPPORT:
volume = {'size': 1}
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
databases = []
@ -383,7 +385,7 @@ class CreateInstanceFail(object):
instance_name = "instance-failure-with-empty-flavor"
databases = []
if VOLUME_SUPPORT:
volume = {'size': 1}
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
assert_raises(exceptions.BadRequest, dbaas.instances.create,
@ -445,7 +447,7 @@ class CreateInstanceFail(object):
@test
def test_create_failure_with_no_name(self):
if VOLUME_SUPPORT:
volume = {'size': 1}
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
instance_name = ""
@ -458,7 +460,7 @@ class CreateInstanceFail(object):
@test
def test_create_failure_with_spaces_for_name(self):
if VOLUME_SUPPORT:
volume = {'size': 1}
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
instance_name = " "
@ -491,7 +493,7 @@ class CreateInstanceFail(object):
if not FAKE:
raise SkipTest("This test only for fake mode.")
if VOLUME_SUPPORT:
volume = {'size': 1}
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
instance_name = "datastore_default_notfound"
@ -515,7 +517,7 @@ class CreateInstanceFail(object):
@test
def test_create_failure_with_datastore_default_version_notfound(self):
if VOLUME_SUPPORT:
volume = {'size': 1}
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
instance_name = "datastore_default_version_notfound"
@ -536,7 +538,7 @@ class CreateInstanceFail(object):
@test
def test_create_failure_with_datastore_notfound(self):
if VOLUME_SUPPORT:
volume = {'size': 1}
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
instance_name = "datastore_notfound"
@ -557,7 +559,7 @@ class CreateInstanceFail(object):
@test
def test_create_failure_with_datastore_version_notfound(self):
if VOLUME_SUPPORT:
volume = {'size': 1}
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
instance_name = "datastore_version_notfound"
@ -580,7 +582,7 @@ class CreateInstanceFail(object):
@test
def test_create_failure_with_datastore_version_inactive(self):
if VOLUME_SUPPORT:
volume = {'size': 1}
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
instance_name = "datastore_version_inactive"
@ -650,7 +652,7 @@ class CreateInstance(object):
instance_info.dbaas_datastore = CONFIG.dbaas_datastore
instance_info.dbaas_datastore_version = CONFIG.dbaas_datastore_version
if VOLUME_SUPPORT:
instance_info.volume = {'size': 1}
instance_info.volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
instance_info.volume = None
@ -709,6 +711,7 @@ class CreateInstance(object):
groups=[GROUP, tests.INSTANCES],
runs_after_groups=[tests.PRE_INSTANCES])
class CreateInstanceFlavors(object):
def _result_is_active(self):
instance = dbaas.instances.get(self.result.id)
if instance.status == "ACTIVE":
@ -736,7 +739,7 @@ class CreateInstanceFlavors(object):
instance_name = "instance-with-flavor-%s" % flavor_id
databases = []
if VOLUME_SUPPORT:
volume = {'size': 1}
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
self.result = dbaas.instances.create(instance_name, flavor_id, volume,
@ -755,6 +758,7 @@ class CreateInstanceFlavors(object):
@test(depends_on_classes=[InstanceSetup], groups=[GROUP_NEUTRON])
class CreateInstanceWithNeutron(unittest.TestCase):
@time_out(TIMEOUT_INSTANCE_CREATE)
def setUp(self):
if not CONFIG.values.get('neutron_enabled'):
@ -771,7 +775,7 @@ class CreateInstanceWithNeutron(unittest.TestCase):
databases = []
self.default_cidr = CONFIG.values.get('shared_network_subnet', None)
if VOLUME_SUPPORT:
volume = {'size': 1}
volume = {'size': CONFIG.get('trove_volume_size', 1)}
else:
volume = None
@ -1185,10 +1189,15 @@ class TestInstanceListing(object):
if create_new_instance():
assert_equal(instance_info.volume['size'], instance.volume['size'])
else:
assert_true(isinstance(instance_info.volume['size'], float))
# FIXME(peterstac): Sometimes this returns as an int - is that ok?
assert_true(type(instance_info.volume['size']) in [int, float])
if create_new_instance():
assert_true(0.0 < instance.volume['used']
< instance.volume['size'])
# FIXME(pmalik): Keeps failing because 'used' > 'size'.
# It seems like the reported 'used' space is from the root volume
# instead of the attached Trove volume.
# assert_true(0.0 < instance.volume['used'] <
# instance.volume['size'])
pass
@test(enabled=EPHEMERAL_SUPPORT)
def test_ephemeral_mount(self):
@ -1371,6 +1380,7 @@ class DeleteInstance(object):
runs_after=[DeleteInstance],
groups=[GROUP, GROUP_STOP, 'dbaas.usage'])
class AfterDeleteChecks(object):
@test
def test_instance_delete_event_sent(self):
deleted_at = None
@ -1591,6 +1601,7 @@ class CheckInstance(AttrCheck):
@test(groups=[GROUP])
class BadInstanceStatusBug():
@before_class()
def setUp(self):
self.instances = []

View File

@ -1,307 +0,0 @@
# Copyright [2015] Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration tests for Vertica datastore.
APIs tested for Vertica are:
1. create
2. restart
3. resize-volume
4. resize-instance
5. delete
6. cluster-create
7. cluster-delete
"""
from proboscis import asserts
from proboscis.decorators import before_class
from proboscis import SkipTest
from proboscis import test
from troveclient.compat import exceptions
from trove.common.utils import poll_until
from trove.tests.api.instances import GROUP_START_SIMPLE
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import WaitForGuestInstallationToFinish
from trove.tests.config import CONFIG
from trove.tests.util.check import TypeCheck
from trove.tests.util import create_dbaas_client
VERTICA_GROUP = "dbaas.api.vertica"
TIMEOUT = 2300
SLEEP_TIME = 60
@test(depends_on_groups=[GROUP_START_SIMPLE], groups=[VERTICA_GROUP],
runs_after=[WaitForGuestInstallationToFinish])
class VerticaTest(object):
"""Tests Vertica Datastore Features."""
@before_class
def setUp(self):
self.instance = instance_info
self.rd_client = create_dbaas_client(self.instance.user)
self.report = CONFIG.get_report()
def _find_status(self, rd_client, instance_id, expected_status):
"""Tracks instance status, until it gets to expected_status."""
instance = rd_client.instances.get(instance_id)
self.report.log("Instance info %s." % instance._info)
if instance.status == expected_status:
self.report.log("Instance: %s is ready." % instance_id)
return True
else:
return False
@test
def test_instance_restart(self):
"""Tests the restart API."""
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.restart(self.instance.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("REBOOT", test_instance.status)
poll_until(lambda: self._find_status(self.rd_client,
self.instance.id, "ACTIVE"),
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Restarted Instance: %s." % self.instance.id)
@test(depends_on=[test_instance_restart])
def test_instance_resize_volume(self):
"""Tests the resize volume API."""
old_volume_size = int(instance_info.volume['size'])
new_volume_size = old_volume_size + 1
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.resize_volume(self.instance.id,
new_volume_size)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("RESIZE", test_instance.status)
poll_until(lambda: self._find_status(self.rd_client,
self.instance.id, "ACTIVE"),
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal(instance.volume['size'], new_volume_size)
self.report.log("Resized Volume for Instance ID: %s to %s." % (
self.instance.id, new_volume_size))
@test(depends_on=[test_instance_resize_volume])
def test_instance_resize_flavor(self):
"""Tests the resize instance/flavor API."""
flavor_name = CONFIG.values.get('instance_bigger_flavor_name',
'm1.medium')
flavors = self.instance.dbaas.find_flavors_by_name(flavor_name)
new_flavor = flavors[0]
asserts.assert_true(new_flavor is not None,
"Flavor '%s' not found!" % flavor_name)
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.resize_instance(self.instance.id,
new_flavor.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("RESIZE", test_instance.status)
poll_until(lambda: self._find_status(self.rd_client,
self.instance.id, "ACTIVE"),
sleep_time=SLEEP_TIME, time_out=TIMEOUT)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal(int(test_instance.flavor['id']), new_flavor.id)
self.report.log("Resized Flavor for Instance ID: %s to %s." % (
self.instance.id, new_flavor.id))
@test(depends_on=[test_instance_resize_flavor])
def test_instance_delete(self):
"""Tests the instance delete."""
if not getattr(self, 'instance', None):
raise SkipTest(
"Skipping this test since instance is not available.")
self.rd_client = create_dbaas_client(self.instance.user)
self.rd_client.instances.delete(self.instance.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
test_instance = self.rd_client.instances.get(self.instance.id)
asserts.assert_equal("SHUTDOWN", test_instance.status)
def _poll():
try:
instance = self.rd_client.instances.get(self.instance.id)
self.report.log("Instance info %s" % instance._info)
asserts.assert_equal("SHUTDOWN", instance.status)
return False
except exceptions.NotFound:
self.report.log("Instance has gone.")
asserts.assert_equal(404, self.rd_client.last_http_code)
return True
poll_until(_poll, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Deleted Instance ID: %s " % self.instance.id)
@test(depends_on=[test_instance_delete])
def test_create_cluster_with_fewer_instances(self):
invalid_request_body_with_few_instances = [
{"flavorRef": 2, "volume": {"size": 1}}]
self.rd_client = create_dbaas_client(self.instance.user)
asserts.assert_raises(
exceptions.BadRequest,
self.rd_client.clusters.create,
"test_cluster",
self.instance.dbaas_datastore,
self.instance.dbaas_datastore_version,
instances=invalid_request_body_with_few_instances)
asserts.assert_equal(400, self.rd_client.last_http_code)
@test(depends_on=[test_create_cluster_with_fewer_instances])
def test_create_cluster_with_different_flavors(self):
invalid_request_body_with_different_flavors = [
{"flavorRef": 3, "volume": {"size": 1}},
{"flavorRef": 4, "volume": {"size": 1}}]
asserts.assert_raises(
exceptions.BadRequest,
self.rd_client.clusters.create,
"test_cluster",
self.instance.dbaas_datastore,
self.instance.dbaas_datastore_version,
instances=invalid_request_body_with_different_flavors)
asserts.assert_equal(400, self.rd_client.last_http_code)
@test(depends_on=[test_create_cluster_with_different_flavors])
def test_create_cluster_with_different_volumes(self):
invalid_request_body_with_different_volumes = [
{"flavorRef": 2, "volume": {"size": 2}},
{"flavorRef": 2, "volume": {"size": 3}}]
asserts.assert_raises(
exceptions.BadRequest,
self.rd_client.clusters.create,
"test_cluster",
self.instance.dbaas_datastore,
self.instance.dbaas_datastore_version,
instances=invalid_request_body_with_different_volumes)
asserts.assert_equal(400, self.rd_client.last_http_code)
@test(depends_on=[test_create_cluster_with_different_volumes])
def test_create_cluster_successfuly(self):
valid_request_body = [
{"flavorRef": self.instance.dbaas_flavor_href,
"volume": self.instance.volume},
{"flavorRef": self.instance.dbaas_flavor_href,
"volume": self.instance.volume}]
self.cluster = self.rd_client.clusters.create(
"test_cluster", self.instance.dbaas_datastore,
self.instance.dbaas_datastore_version,
instances=valid_request_body)
with TypeCheck('Cluster', self.cluster) as check:
check.has_field("id", basestring)
check.has_field("name", basestring)
check.has_field("datastore", dict)
check.has_field("instances", list)
check.has_field("links", list)
check.has_field("created", unicode)
check.has_field("updated", unicode)
for instance in self.cluster.instances:
isinstance(instance, dict)
asserts.assert_is_not_none(instance['id'])
asserts.assert_is_not_none(instance['links'])
asserts.assert_is_not_none(instance['name'])
asserts.assert_equal(200, self.rd_client.last_http_code)
@test(depends_on=[test_create_cluster_successfuly])
def test_wait_until_cluster_is_active(self):
if not getattr(self, 'cluster', None):
raise SkipTest(
"Skipping this test since cluster is not available.")
def result_is_active():
cluster = self.rd_client.clusters.get(self.cluster.id)
cluster_instances = [
self.rd_client.instances.get(instance['id'])
for instance in cluster.instances]
self.report.log("Cluster info %s." % cluster._info)
self.report.log("Cluster instances info %s." % cluster_instances)
if cluster.task['name'] == "NONE":
if ["ERROR"] * len(cluster_instances) == [
str(instance.status) for instance in cluster_instances]:
self.report.log("Cluster provisioning failed.")
asserts.fail("Cluster provisioning failed.")
if ["ACTIVE"] * len(cluster_instances) == [
str(instance.status) for instance in cluster_instances]:
self.report.log("Cluster is ready.")
return True
else:
asserts.assert_not_equal(
["ERROR"] * len(cluster_instances),
[instance.status
for instance in cluster_instances])
self.report.log("Continue polling, cluster is not ready yet.")
poll_until(result_is_active, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Created cluster, ID = %s." % self.cluster.id)
@test(depends_on=[test_wait_until_cluster_is_active])
def test_cluster_delete(self):
if not getattr(self, 'cluster', None):
raise SkipTest(
"Skipping this test since cluster is not available.")
self.rd_client.clusters.delete(self.cluster.id)
asserts.assert_equal(202, self.rd_client.last_http_code)
def _poll():
try:
cluster = self.rd_client.clusters.get(
self.cluster.id)
self.report.log("Cluster info %s" % cluster._info)
asserts.assert_equal("DELETING", cluster.task['name'])
return False
except exceptions.NotFound:
self.report.log("Cluster is not available.")
asserts.assert_equal(404, self.rd_client.last_http_code)
return True
poll_until(_poll, sleep_time=SLEEP_TIME, time_out=TIMEOUT)
self.report.log("Deleted cluster: %s." % self.cluster.id)

View File

@ -87,6 +87,7 @@ class TestConfig(object):
"in_proc_server": True,
"report_directory": os.environ.get("REPORT_DIRECTORY", None),
"trove_volume_support": True,
"trove_volume_size": 1,
"trove_max_volumes_per_user": 100,
"usage_endpoint": USAGE_ENDPOINT,
"root_on_create": False,

View File

@ -31,12 +31,34 @@ from trove.tests.api import root
from trove.tests.api import user_access
from trove.tests.api import users
from trove.tests.api import versions
from trove.tests.api import vertica
from trove.tests.scenario.groups import cluster_actions_group
from trove.tests.scenario.groups import instance_actions_group
from trove.tests.scenario.groups import instance_delete_group
from trove.tests.scenario.groups import negative_cluster_actions_group
from trove.tests.scenario.groups import replication_group
GROUP_SERVICES_INITIALIZE = "services.initialize"
def build_group(*groups):
def merge(collection, *items):
for item in items:
if isinstance(item, list):
merge(collection, *item)
else:
if item not in collection:
collection.append(item)
out = []
merge(out, *groups)
return out
def register(datastores, *test_groups):
proboscis.register(groups=build_group(datastores),
depends_on_groups=build_group(*test_groups))
black_box_groups = [
flavors.GROUP,
users.GROUP,
@ -85,25 +107,39 @@ black_box_mgmt_groups = [
proboscis.register(groups=["blackbox_mgmt"],
depends_on_groups=black_box_mgmt_groups)
# Datastores groups for int-tests
datastore_group = [
#
# Group designations for datastore agnostic int-tests
#
initial_groups = [
GROUP_SERVICES_INITIALIZE,
flavors.GROUP,
versions.GROUP,
instances.GROUP_START_SIMPLE,
instance_delete_group.GROUP
]
proboscis.register(groups=["cassandra", "couchbase", "mongodb", "postgresql",
"redis"],
depends_on_groups=datastore_group)
instance_actions_groups = list(initial_groups)
instance_actions_groups.extend([instance_actions_group.GROUP])
# Vertica int-tests
vertica_group = [
GROUP_SERVICES_INITIALIZE,
flavors.GROUP,
versions.GROUP,
instances.GROUP_START_SIMPLE,
instances.GROUP_QUOTAS,
vertica.VERTICA_GROUP,
]
proboscis.register(groups=["vertica"],
depends_on_groups=vertica_group)
cluster_actions_groups = list(initial_groups)
cluster_actions_groups.extend([cluster_actions_group.GROUP,
negative_cluster_actions_group.GROUP])
replication_groups = list(initial_groups)
replication_groups.extend([replication_group.GROUP])
# Module based groups
register(["instance_actions"], instance_actions_groups)
register(["cluster"], cluster_actions_groups)
register(["replication"], replication_groups)
# Datastore based groups - these should contain all functionality
# currently supported by the datastore
register(["cassandra_supported"], instance_actions_groups)
register(["couchbase_supported"], instance_actions_groups)
register(["postgresql_supported"], instance_actions_groups)
register(["mongodb_supported"], instance_actions_groups,
cluster_actions_groups)
register(["mysql_supported"], instance_actions_groups, replication_groups)
register(["redis_supported"], instance_actions_groups)
register(["vertica_supported"], instance_actions_groups,
cluster_actions_groups)

View File

View File

View File

@ -0,0 +1,40 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.api.instances import GROUP_START_SIMPLE
from trove.tests.api.instances import WaitForGuestInstallationToFinish
from trove.tests.scenario.groups.test_group import TestGroup
GROUP = "scenario.cluster_actions_group"
@test(depends_on_groups=[GROUP_START_SIMPLE], groups=[GROUP],
runs_after=[WaitForGuestInstallationToFinish])
class ClusterActionsGroup(TestGroup):
def __init__(self):
super(ClusterActionsGroup, self).__init__(
'cluster_actions_runners', 'ClusterActionsRunner')
@test
def cluster_create(self):
self.test_runner.run_cluster_create()
@test(depends_on=[cluster_create])
def cluster_delete(self):
self.test_runner.run_cluster_delete()

View File

@ -0,0 +1,44 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.api.instances import GROUP_START_SIMPLE
from trove.tests.api.instances import WaitForGuestInstallationToFinish
from trove.tests.scenario.groups.test_group import TestGroup
GROUP = "scenario.instance_actions_group"
@test(depends_on_groups=[GROUP_START_SIMPLE], groups=[GROUP],
runs_after=[WaitForGuestInstallationToFinish])
class InstanceActionsGroup(TestGroup):
def __init__(self):
super(InstanceActionsGroup, self).__init__(
'instance_actions_runners', 'InstanceActionsRunner')
@test
def instance_restart(self):
self.test_runner.run_instance_restart()
@test(depends_on=[instance_restart])
def instance_resize_volume(self):
self.test_runner.run_instance_resize_volume()
@test(depends_on=[instance_resize_volume])
def instance_resize_flavor(self):
self.test_runner.run_instance_resize_flavor()

View File

@ -0,0 +1,38 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.api.instances import GROUP_START_SIMPLE
from trove.tests.scenario.groups import instance_actions_group
from trove.tests.scenario.groups import replication_group
from trove.tests.scenario.groups.test_group import TestGroup
GROUP = "scenario.instance_delete_group"
@test(depends_on_groups=[GROUP_START_SIMPLE], groups=[GROUP],
runs_after_groups=[replication_group.GROUP,
instance_actions_group.GROUP])
class InstanceDeleteGroup(TestGroup):
def __init__(self):
super(InstanceDeleteGroup, self).__init__(
'instance_delete_runners', 'InstanceDeleteRunner')
@test
def instance_delete(self):
self.test_runner.run_instance_delete()

View File

@ -0,0 +1,39 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.api.instances import GROUP_START_SIMPLE
from trove.tests.api.instances import WaitForGuestInstallationToFinish
from trove.tests.scenario.groups.test_group import TestGroup
GROUP = "scenario.negative_cluster_actions_group"
@test(depends_on_groups=[GROUP_START_SIMPLE], groups=[GROUP],
runs_after=[WaitForGuestInstallationToFinish])
class NegativeClusterActionsGroup(TestGroup):
def __init__(self):
super(NegativeClusterActionsGroup, self).__init__(
'negative_cluster_actions_runners', 'NegativeClusterActionsRunner')
@test
def create_constrained_size_cluster(self):
self.test_runner.run_create_constrained_size_cluster()
@test
def create_heterogeneous_cluster(self):
self.test_runner.run_create_heterogeneous_cluster()

View File

@ -0,0 +1,78 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.api.instances import GROUP_START_SIMPLE
from trove.tests.api.instances import WaitForGuestInstallationToFinish
from trove.tests.scenario.groups.test_group import TestGroup
GROUP = "scenario.replication_group"
@test(depends_on_groups=[GROUP_START_SIMPLE], groups=[GROUP],
runs_after=[WaitForGuestInstallationToFinish])
class ReplicationGroup(TestGroup):
def __init__(self):
super(ReplicationGroup, self).__init__(
'replication_runners', 'ReplicationRunner')
@test
def add_data_for_replication(self):
self.test_runner.run_add_data_for_replication()
@test(runs_after=[add_data_for_replication])
def create_replicas(self):
self.test_runner.run_create_replicas()
@test(depends_on=[create_replicas])
def add_data_to_replicate(self):
self.test_runner.run_add_data_to_replicate()
@test(depends_on=[add_data_to_replicate])
def verify_replicated_data(self):
self.test_runner.run_verify_replicated_data()
@test(depends_on=[add_data_to_replicate])
def remove_replicated_data(self):
self.test_runner.run_remove_replicated_data()
@test(depends_on=[create_replicas],
runs_after=[remove_replicated_data])
def promote_master(self):
self.test_runner.run_promote_master()
@test(depends_on=[promote_master])
def eject_replica(self):
self.test_runner.run_eject_replica()
@test(depends_on=[eject_replica])
def eject_valid_master(self):
self.test_runner.run_eject_valid_master()
@test(depends_on=[eject_valid_master])
def delete_valid_master(self):
self.test_runner.run_delete_valid_master()
@test(depends_on=[delete_valid_master])
def swap_replica_master(self):
self.test_runner.run_swap_replica_master()
# TODO(peterstac): Add more tests
@test(depends_on=[swap_replica_master])
def delete_replica_set(self):
self.run_delete_replica_set()

View File

@ -0,0 +1,80 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from trove.guestagent.strategy import Strategy
from trove.tests.config import CONFIG
@six.add_metaclass(abc.ABCMeta)
class TestGroup(object):
TEST_RUNNERS_NS = 'trove.tests.scenario.runners'
TEST_HELPERS_NS = 'trove.tests.scenario.helpers'
TEST_HELPER_MODULE_NAME = 'test_helper'
TEST_HELPER_BASE_NAME = 'TestHelper'
def __init__(self, runner_module_name, runner_base_name, *args, **kwargs):
class_prefix = self._get_test_datastore()
runner_cls = self._load_dynamic_class(
runner_module_name, class_prefix, runner_base_name,
self.TEST_RUNNERS_NS)
self._test_runner = runner_cls(*args, **kwargs)
helper_cls = self._load_dynamic_class(
self.TEST_HELPER_MODULE_NAME, class_prefix,
self.TEST_HELPER_BASE_NAME, self.TEST_HELPERS_NS)
self._test_runner._test_helper = helper_cls(self._build_class_name(
class_prefix, self.TEST_HELPER_BASE_NAME, strip_test=True))
def _get_test_datastore(self):
return CONFIG.dbaas_datastore
def _load_dynamic_class(self, module_name, class_prefix, base_name,
namespace):
"""Try to load a datastore specific class if it exists; use the
default otherwise.
"""
try:
# This is for overridden Runner classes
impl = self._build_class_path(module_name, class_prefix, base_name)
return Strategy.get_strategy(impl, namespace)
except ImportError:
pass
try:
# This is for overridden Helper classes
module = module_name.replace('test', class_prefix.lower())
impl = self._build_class_path(module, class_prefix, base_name,
strip_test=True)
return Strategy.get_strategy(impl, namespace)
except ImportError:
# Just import the base class
impl = self._build_class_path(module_name, '', base_name)
return Strategy.get_strategy(impl, namespace)
def _build_class_path(self, module_name, class_prefix, class_base,
strip_test=False):
class_name = self._build_class_name(class_prefix, class_base,
strip_test)
return '%s.%s' % (module_name, class_name)
def _build_class_name(self, class_prefix, base_name, strip_test=False):
base = (base_name.replace('Test', '') if strip_test else base_name)
return '%s%s' % (class_prefix.capitalize(), base)
@property
def test_runner(self):
return self._test_runner

View File

View File

@ -0,0 +1,139 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import redis
from trove.tests.scenario.helpers.test_helper import TestHelper
from trove.tests.scenario.runners.test_runners import TestRunner
class RedisHelper(TestHelper):
def __init__(self, expected_override_name):
super(RedisHelper, self).__init__(expected_override_name)
self.key_pattern = 'user:%s'
self.value_pattern = 'id:%s'
self.label_value = 'value_set'
self.tiny_data_label = 'tiny'
self.tiny_data_start = 1
self.tiny_data_size = 100
self.small_data_label = 'small'
self.small_data_start = 1000
self.small_data_size = 1000
self.large_data_label = 'large'
self.large_data_start = 100000
self.large_data_size = 100000
def create_client(self, host, *args, **kwargs):
# Redis is set up without a password at the moment.
password = None
client = redis.StrictRedis(password=password, host=host)
return client
# Add data overrides
def add_tiny_data(self, host, *args, **kwargs):
self._add_data(self.tiny_data_label, self.tiny_data_start,
self.tiny_data_size, host, *args, **kwargs)
def add_small_data(self, host, *args, **kwargs):
self._add_data(self.small_data_label, self.small_data_start,
self.small_data_size, host, *args, **kwargs)
def add_large_data(self, host, *args, **kwargs):
self._add_data(self.large_data_label, self.large_data_start,
self.large_data_size, host, *args, **kwargs)
def _add_data(self, data_label, data_start, data_size, host,
*args, **kwargs):
"""Add the actual data here."""
client = self.get_client(host, *args, **kwargs)
test_set = client.get(data_label)
if not test_set:
for num in range(data_start, data_start + data_size):
client.set(self.key_pattern % str(num),
self.value_pattern % str(num))
# now that the data is there, add the label
client.set(data_label, self.label_value)
# Remove data overrides
def remove_tiny_data(self, host, *args, **kwargs):
self._remove_data(self.tiny_data_label, self.tiny_data_start,
self.tiny_data_size, host, *args, **kwargs)
def remove_small_data(self, host, *args, **kwargs):
self._remove_data(self.small_data_label, self.small_data_start,
self.small_data_size, host, *args, **kwargs)
def remove_large_data(self, host, *args, **kwargs):
self._remove_data(self.large_data_label, self.large_data_start,
self.large_data_size, host, *args, **kwargs)
def _remove_data(self, data_label, data_start, data_size, host,
*args, **kwargs):
"""Remove the actual data here."""
client = self.get_client(host, *args, **kwargs)
test_set = client.get(data_label)
if test_set:
for num in range(data_start, data_start + data_size):
client.set(self.key_pattern % str(num), None)
# now that the data is gone, remove the label
client.set(data_label, None)
# Verify data overrides
def verify_tiny_data(self, host, *args, **kwargs):
self._verify_data(self.tiny_data_label, self.tiny_data_start,
self.tiny_data_size, host, *args, **kwargs)
def verify_small_data(self, host, *args, **kwargs):
self._verify_data(self.small_data_label, self.small_data_start,
self.small_data_size, host, *args, **kwargs)
def verify_large_data(self, host, *args, **kwargs):
self._verify_data(self.large_data_label, self.large_data_start,
self.large_data_size, host, *args, **kwargs)
def _verify_data(self, data_label, data_start, data_size, host,
*args, **kwargs):
"""Verify the actual data here."""
client = self.get_client(host, *args, **kwargs)
# make sure the data is there - tests edge cases and a random one
self._verify_data_point(client, data_label, self.label_value)
midway_num = data_start + int(data_size / 2)
random_num = random.randint(data_start + 2,
data_start + data_size - 3)
for num in [data_start,
data_start + 1,
midway_num,
random_num,
data_start + data_size - 2,
data_start + data_size - 1]:
self._verify_data_point(client,
self.key_pattern % num,
self.value_pattern % num)
# negative tests
for num in [data_start - 1,
data_start + data_size]:
self._verify_data_point(client, self.key_pattern % num, None)
def _verify_data_point(self, client, key, expected_value):
value = client.get(key)
TestRunner.assert_equal(expected_value, value,
"Unexpected value '%s' returned from Redis "
"key '%s'" % (value, key))

View File

@ -0,0 +1,176 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from enum import Enum
import inspect
from proboscis import SkipTest
class DataType(Enum):
"""
Represent the type of data to add to a datastore. This allows for
multiple 'states' of data that can be verified after actions are
performed by Trove.
"""
# very tiny amount of data, useful for testing replication
# propagation, etc.
tiny = 1
# small amount of data (this can be added to each instance
# after creation, for example).
small = 2
# large data, enough to make creating a backup take 20s or more.
large = 3
class TestHelper(object):
"""
Base class for all 'Helper' classes.
The Helper classes are designed to do datastore specific work
that can be used by multiple runner classes. Things like adding
data to datastores and verifying data or internal database states,
etc. should be handled by these classes.
"""
# Define the actions that can be done on each DataType
FN_ACTION_ADD = 'add'
FN_ACTION_REMOVE = 'remove'
FN_ACTION_VERIFY = 'verify'
FN_ACTIONS = [FN_ACTION_ADD, FN_ACTION_REMOVE, FN_ACTION_VERIFY]
def __init__(self, expected_override_name):
"""Initialize the helper class by creating a number of stub
functions that each datastore specific class can chose to
override. Basically, the functions are of the form:
{FN_ACTION_*}_{DataType.name}_data
For example:
add_tiny_data
add_small_data
remove_small_data
verify_large_data
and so on. Add and remove actions throw a SkipTest if not
implemented, and verify actions by default do nothing.
"""
super(TestHelper, self).__init__()
self._ds_client = None
self._current_host = None
self._expected_override_name = expected_override_name
# For building data access functions
# name/fn pairs for each action
self._data_fns = {self.FN_ACTION_ADD: {},
self.FN_ACTION_REMOVE: {},
self.FN_ACTION_VERIFY: {}}
# Types of data functions to create.
# Pattern used to create the data functions. The first parameter
# is the function type (FN_ACTION_*), the second is the DataType
self.data_fn_pattern = '%s_%s_data'
self._build_data_fns()
def get_client(self, host, *args, **kwargs):
"""Gets the datastore client."""
if not self._ds_client or self._current_host != host:
self._ds_client = self.create_client(host, *args, **kwargs)
self._current_host = host
return self._ds_client
def create_client(self, host, *args, **kwargs):
"""Create a datastore client."""
raise SkipTest('No client defined')
def add_data(self, data_type, host, *args, **kwargs):
"""Adds data of type 'data_type' to the database. Descendant
classes should implement a function for each DataType value
of the form 'add_{DataType.name}_data' - for example:
'add_tiny_data'
'add_small_data'
...
Since this method may be called multiple times, the implemented
'add_*_data' functions should be idempotent.
"""
self._perform_data_action(self.FN_ACTION_ADD, data_type, host,
*args, **kwargs)
def remove_data(self, data_type, host, *args, **kwargs):
"""Removes all data associated with 'data_type'. See
instructions for 'add_data' for implementation guidance.
"""
self._perform_data_action(self.FN_ACTION_REMOVE, data_type, host,
*args, **kwargs)
def verify_data(self, data_type, host, *args, **kwargs):
"""Verify that the data of type 'data_type' exists in the
datastore. This can be done by testing edge cases, and possibly
some random elements within the set. See
instructions for 'add_data' for implementation guidance.
"""
self._perform_data_action(self.FN_ACTION_VERIFY, data_type, host,
*args, **kwargs)
def _perform_data_action(self, action_type, data_type, host,
*args, **kwargs):
fns = self._data_fns[action_type]
data_fn_name = self.data_fn_pattern % (action_type, data_type.name)
try:
fns[data_fn_name](self, host, *args, **kwargs)
except SkipTest:
raise
except Exception as ex:
raise RuntimeError("Error calling %s from class %s - %s" %
(data_fn_name, self.__class__.__name__, ex))
def _build_data_fns(self):
"""Build the base data functions specified by FN_ACTION_*
for each of the types defined in the DataType class. For example,
'add_small_data' and 'verify_large_data'. These
functions can be overwritten by a descendant class and
those overwritten functions will be bound before calling
any data functions such as 'add_data' or 'remove_data'.
"""
for fn_type in self.FN_ACTIONS:
fn_dict = self._data_fns[fn_type]
for data_type in DataType:
self._data_fn_builder(fn_type, data_type, fn_dict)
self._override_data_fns()
def _data_fn_builder(self, fn_type, data_type, fn_dict):
"""Builds the actual function with a SkipTest exception,
and changes the name to reflect the pattern.
"""
name = self.data_fn_pattern % (fn_type, data_type.name)
def data_fn(self, host, *args, **kwargs):
# default action is to skip the test
using_str = ''
if self._expected_override_name != self.__class__.__name__:
using_str = ' (using %s)' % self.__class__.__name__
raise SkipTest("Data function '%s' not found in '%s'%s" %
(name, self._expected_override_name, using_str))
data_fn.__name__ = data_fn.func_name = name
fn_dict[name] = data_fn
def _override_data_fns(self):
"""Bind the override methods to the dict."""
members = inspect.getmembers(self.__class__,
predicate=inspect.ismethod)
for fn_action in self.FN_ACTIONS:
fns = self._data_fns[fn_action]
for name, fn in members:
if name in fns:
fns[name] = fn

View File

View File

@ -0,0 +1,128 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time as timer
from trove.tests.scenario.runners.test_runners import TestRunner
from trove.tests.util.check import TypeCheck
from troveclient.compat import exceptions
class ClusterActionsRunner(TestRunner):
def __init__(self):
super(ClusterActionsRunner, self).__init__()
self.cluster_id = 0
def run_cluster_create(
self, num_nodes=2, expected_instance_states=['BUILD', 'ACTIVE'],
expected_http_code=200):
instances_def = [
self.build_flavor(
flavor_id=self.instance_info.dbaas_flavor_href,
volume_size=self.instance_info.volume['size'])] * num_nodes
self.cluster_id = self.assert_cluster_create(
'test_cluster', instances_def,
expected_instance_states,
expected_http_code)
def assert_cluster_create(self, cluster_name, instances_def,
expected_instance_states, expected_http_code):
self.report.log("Testing cluster create: %s" % cluster_name)
cluster = self.auth_client.clusters.create(
cluster_name, self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version,
instances=instances_def)
cluster_id = cluster.id
self._assert_cluster_action(cluster_id, 'BUILDING', expected_http_code)
cluster_instances = self._get_cluster_instances(cluster_id)
self.assert_all_instance_states(
cluster_instances, expected_instance_states)
self._assert_cluster_state(cluster_id, 'NONE')
return cluster_id
def run_cluster_delete(
self, expected_last_instance_state='SHUTDOWN',
expected_http_code=202):
self.assert_cluster_delete(
self.cluster_id, expected_last_instance_state, expected_http_code)
def assert_cluster_delete(self, cluster_id, expected_last_instance_state,
expected_http_code):
self.report.log("Testing cluster delete: %s" % cluster_id)
cluster_instances = self._get_cluster_instances(cluster_id)
self.auth_client.clusters.delete(cluster_id)
self._assert_cluster_action(cluster_id, 'DELETING', expected_http_code)
self.assert_all_gone(cluster_instances, expected_last_instance_state)
self._assert_cluster_gone(cluster_id)
def _get_cluster_instances(self, cluster_id):
cluster = self.auth_client.clusters.get(cluster_id)
return [self.auth_client.instances.get(instance['id'])
for instance in cluster.instances]
def _assert_cluster_action(
self, cluster_id, expected_state, expected_http_code):
if expected_http_code is not None:
self.assert_client_code(expected_http_code)
if expected_state:
self._assert_cluster_state(cluster_id, expected_state)
def _assert_cluster_state(self, cluster_id, expected_state):
cluster = self.auth_client.clusters.get(cluster_id)
with TypeCheck('Cluster', cluster) as check:
check.has_field("id", basestring)
check.has_field("name", basestring)
check.has_field("datastore", dict)
check.has_field("instances", list)
check.has_field("links", list)
check.has_field("created", unicode)
check.has_field("updated", unicode)
for instance in cluster.instances:
isinstance(instance, dict)
self.assert_is_not_none(instance['id'])
self.assert_is_not_none(instance['links'])
self.assert_is_not_none(instance['name'])
self.assert_equal(expected_state, cluster.task['name'],
'Unexpected cluster task name')
def _assert_cluster_gone(self, cluster_id):
t0 = timer.time()
try:
self.auth_client.clusters.get(cluster_id)
self.fail(
"Cluster '%s' still existed after %s seconds."
% (cluster_id, self._time_since(t0)))
except exceptions.NotFound:
self.assert_client_code(404)
class MongodbClusterActionsRunner(ClusterActionsRunner):
def run_cluster_create(self, num_nodes=3,
expected_instance_states=['BUILD', 'ACTIVE'],
expected_http_code=200):
super(MongodbClusterActionsRunner, self).run_cluster_create(
num_nodes=num_nodes,
expected_instance_states=expected_instance_states,
expected_http_code=expected_http_code)

View File

@ -0,0 +1,100 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import SkipTest
from trove.tests.config import CONFIG
from trove.tests.scenario.runners.test_runners import TestRunner
class InstanceActionsRunner(TestRunner):
def __init__(self):
super(InstanceActionsRunner, self).__init__()
def _get_resize_flavor(self):
if self.EPHEMERAL_SUPPORT:
flavor_name = CONFIG.values.get(
'instance_bigger_eph_flavor_name', 'eph.rd-smaller')
else:
flavor_name = CONFIG.values.get(
'instance_bigger_flavor_name', 'm1.rd-smaller')
return self.get_flavor(flavor_name)
def run_instance_restart(
self, expected_states=['REBOOT', 'ACTIVE'],
expected_http_code=202):
self.assert_instance_restart(self.instance_info.id, expected_states,
expected_http_code)
def assert_instance_restart(self, instance_id, expected_states,
expected_http_code):
self.report.log("Testing restart on instance: %s" % instance_id)
self.auth_client.instances.restart(instance_id)
self.assert_instance_action(instance_id, expected_states,
expected_http_code)
def run_instance_resize_volume(
self, resize_amount=1,
expected_states=['RESIZE', 'ACTIVE'],
expected_http_code=202):
if self.VOLUME_SUPPORT:
self.assert_instance_resize_volume(self.instance_info.id,
resize_amount,
expected_states,
expected_http_code)
else:
raise SkipTest("Volume support is disabled.")
def assert_instance_resize_volume(self, instance_id, resize_amount,
expected_states, expected_http_code):
self.report.log("Testing volume resize by '%d' on instance: %s"
% (resize_amount, instance_id))
instance = self.get_instance(instance_id)
old_volume_size = int(instance.volume['size'])
new_volume_size = old_volume_size + resize_amount
self.auth_client.instances.resize_volume(instance_id, new_volume_size)
self.assert_instance_action(instance_id, expected_states,
expected_http_code)
instance = self.get_instance(instance_id)
self.assert_equal(instance.volume['size'], new_volume_size,
'Unexpected new volume size')
def run_instance_resize_flavor(
self, expected_states=['RESIZE', 'ACTIVE'],
expected_http_code=202):
resize_flavor = self._get_resize_flavor()
self.assert_instance_resize_flavor(self.instance_info.id,
resize_flavor, expected_states,
expected_http_code)
def assert_instance_resize_flavor(self, instance_id, resize_flavor,
expected_states, expected_http_code):
self.report.log("Testing resize to '%s' on instance: %s"
% (resize_flavor, instance_id))
self.auth_client.instances.resize_instance(instance_id,
resize_flavor.id)
self.assert_instance_action(instance_id, expected_states,
expected_http_code)
instance = self.get_instance(instance_id)
self.assert_equal(int(instance.flavor['id']), resize_flavor.id,
'Unexpected resize flavor_id')

View File

@ -0,0 +1,45 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import proboscis
from trove.tests.scenario.runners.test_runners import TestRunner
class InstanceDeleteRunner(TestRunner):
def __init__(self):
super(InstanceDeleteRunner, self).__init__()
def run_instance_delete(
self, expected_states=['SHUTDOWN'],
expected_http_code=202):
if self.has_do_not_delete_instance:
self.report.log("TESTS_DO_NOT_DELETE_INSTANCE=True was "
"specified, skipping delete...")
raise proboscis.SkipTest("TESTS_DO_NOT_DELETE_INSTANCE "
"was specified.")
self.assert_instance_delete(self.instance_info.id, expected_states,
expected_http_code)
def assert_instance_delete(self, instance_id, expected_states,
expected_http_code):
self.report.log("Testing delete on instance: %s" % instance_id)
self.auth_client.instances.delete(instance_id)
self.assert_instance_action(instance_id, expected_states,
expected_http_code)
self.assert_all_gone(instance_id, expected_states[-1])

View File

@ -0,0 +1,74 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from trove.tests.scenario.runners.test_runners import TestRunner
from troveclient.compat import exceptions
class NegativeClusterActionsRunner(TestRunner):
def __init__(self):
super(NegativeClusterActionsRunner, self).__init__()
def run_create_constrained_size_cluster(self, min_nodes=2, max_nodes=None,
expected_http_code=400):
self.assert_create_constrained_size_cluster('negative_cluster',
min_nodes, max_nodes,
expected_http_code)
def assert_create_constrained_size_cluster(self, cluster_name,
min_nodes, max_nodes,
expected_http_code):
# Create a cluster with less than 'min_nodes'.
if min_nodes:
instances_def = [self.build_flavor()] * (min_nodes - 1)
self._assert_cluster_create_raises(cluster_name, instances_def,
expected_http_code)
# Create a cluster with mare than 'max_nodes'.
if max_nodes:
instances_def = [self.build_flavor()] * (max_nodes + 1)
self._assert_cluster_create_raises(cluster_name, instances_def,
expected_http_code)
def run_create_heterogeneous_cluster(self, expected_http_code=400):
# Create a cluster with different node flavors.
instances_def = [self.build_flavor(flavor_id=2, volume_size=1),
self.build_flavor(flavor_id=3, volume_size=1)]
self._assert_cluster_create_raises('heterocluster',
instances_def, expected_http_code)
# Create a cluster with different volume sizes.
instances_def = [self.build_flavor(flavor_id=2, volume_size=1),
self.build_flavor(flavor_id=2, volume_size=2)]
self._assert_cluster_create_raises('heterocluster',
instances_def, expected_http_code)
def _assert_cluster_create_raises(self, cluster_name, instances_def,
expected_http_code):
self.assert_raises(exceptions.BadRequest, expected_http_code,
self.auth_client.clusters.create,
cluster_name,
self.instance_info.dbaas_datastore,
self.instance_info.dbaas_datastore_version,
instances=instances_def)
class MongodbNegativeClusterActionsRunner(NegativeClusterActionsRunner):
def run_create_constrained_size_cluster(self):
super(NegativeClusterActionsRunner,
self).run_create_constrained_size_cluster(min_nodes=3,
max_nodes=3)

View File

@ -0,0 +1,216 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from time import sleep
from trove.tests.api.instances import CheckInstance
from trove.tests.scenario.helpers.test_helper import DataType
from trove.tests.scenario.runners.test_runners import TestRunner
from troveclient.compat import exceptions
class ReplicationRunner(TestRunner):
def __init__(self):
super(ReplicationRunner, self).__init__()
self.master_id = self.instance_info.id
self.replica_1_id = 0
self.replica_2_id = 0
self.master_host = self.get_instance_host(self.master_id)
self.replica_1_host = None
self.replica_2_host = None
def run_add_data_for_replication(self):
self.assert_add_data_for_replication(self.master_host)
def assert_add_data_for_replication(self, host):
"""In order for this to work, the corresponding datastore
'helper' class should implement the 'add_small_data' method.
"""
self.test_helper.add_data(DataType.small, host)
def run_create_replicas(self, expected_states=['BUILD', 'ACTIVE'],
expected_http_code=200):
self.assert_valid_replication_data(self.master_host)
master_id = self.instance_info.id
self.replica_1_id = self.assert_replica_create(
master_id, 'replica1', expected_states,
expected_http_code)
self.replica_2_id = self.assert_replica_create(
master_id, 'replica2', expected_states,
expected_http_code)
self._assert_is_master(master_id,
[self.replica_1_id, self.replica_2_id])
self.replica_1_host = self.get_instance_host(self.replica_1_id)
self.replica_2_host = self.get_instance_host(self.replica_2_id)
self.assert_valid_replication_data(self.replica_1_host)
self.assert_valid_replication_data(self.replica_2_host)
def assert_valid_replication_data(self, host):
"""In order for this to work, the corresponding datastore
'helper' class should implement the 'verify_small_data' method.
"""
self.test_helper.verify_data(DataType.small, host)
def assert_replica_create(self, master_id, replica_name, expected_states,
expected_http_code):
replica = self.auth_client.instances.create(
self.instance_info.name + replica_name,
self.instance_info.dbaas_flavor_href,
self.instance_info.volume,
slave_of=master_id)
replica_id = replica.id
self.assert_instance_action(replica_id, expected_states,
expected_http_code)
self._assert_is_master(master_id, [replica_id])
self._assert_is_replica(replica_id, master_id)
return replica_id
def run_add_data_to_replicate(self):
self.assert_add_data_to_replicate(self.master_host)
def assert_add_data_to_replicate(self, host):
"""In order for this to work, the corresponding datastore
'helper' class should implement the 'add_tiny_data' method.
"""
self.test_helper.add_data(DataType.tiny, host)
def run_verify_replicated_data(self):
sleep(30)
self.assert_verify_replicated_data(self.master_host)
self.assert_verify_replicated_data(self.replica_1_host)
self.assert_verify_replicated_data(self.replica_2_host)
def assert_verify_replicated_data(self, host):
"""In order for this to work, the corresponding datastore
'helper' class should implement the 'add_tiny_data' method.
"""
self.test_helper.verify_data(DataType.tiny, host)
def run_remove_replicated_data(self):
self.assert_remove_replicated_data(self.master_host)
def assert_remove_replicated_data(self, host):
"""In order for this to work, the corresponding datastore
'helper' class should implement the 'remove_tiny_data' method.
"""
self.test_helper.remove_data(DataType.tiny, host)
def run_promote_master(self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.instances.promote_to_replica_source,
self.instance_info.id)
def run_eject_replica(self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.instances.eject_replica_source,
self.replica_1_id)
def run_eject_valid_master(self, expected_exception=exceptions.BadRequest,
expected_http_code=400):
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.instances.eject_replica_source,
self.instance_info.id)
def run_delete_valid_master(self, expected_exception=exceptions.Forbidden,
expected_http_code=403):
self.assert_raises(
expected_exception, expected_http_code,
self.auth_client.instances.delete,
self.instance_info.id)
def run_swap_replica_master(
self, expected_states=['PROMOTE', 'ACTIVE'],
expected_http_code=202):
self.assert_swap_replica_master(
self.instance_info.id, self.replica_1_id, expected_states,
expected_http_code)
def assert_swap_replica_master(
self, master_id, replica_id, expected_states, expected_http_code):
other_replica_ids = self._get_replica_set(master_id)
other_replica_ids.remove(replica_id)
# Promote replica
self.assert_replica_promote(self.replica_1_id, expected_states,
expected_http_code)
current_replicas = list(master_id)
current_replicas.extend(other_replica_ids)
self._assert_is_master(replica_id, current_replicas)
self._assert_is_replica(master_id, replica_id)
# Promote the original master
self.assert_replica_promote(self.instance_info.id, expected_states,
expected_http_code)
current_replicas = list(replica_id)
current_replicas.extend(other_replica_ids)
self._assert_is_master(master_id, current_replicas)
self._assert_is_replica(replica_id, master_id)
def assert_replica_promote(self, replica_id, expected_states,
expected_http_code):
replica = self.get_instance(replica_id)
self.auth_client.instances.promote_to_replica_source(replica)
self.assert_instance_action(replica_id, expected_states,
expected_http_code)
def _assert_is_replica(self, instance_id, master_id):
instance = self.get_instance(instance_id)
self.assert_client_code(200)
CheckInstance(instance._info).slave_of()
self.assert_equal(master_id, instance._info['replica_of']['id'],
'Unexpected replication master ID')
def _assert_is_master(self, instance_id, replica_ids):
instance = self.get_instance(instance_id)
self.assert_client_code(200)
CheckInstance(instance._info).slaves()
self.assert_is_sublist(replica_ids, self._get_replica_set(instance_id))
def _get_replica_set(self, master_id):
instance = self.get_instance(master_id)
replica_ids = [replica['id'] for replica in instance._info['replicas']]
self.assert_unique(replica_ids, "Master '%s' has bad replica list"
% master_id)
return replica_ids
def run_delete_replica_set(self, expected_last_instance_state=['SHUTDOWN'],
expected_http_code=202):
self.assert_delete_replica_set(
self.instance_info.id, expected_last_instance_state,
expected_http_code)
def assert_delete_replica_set(self, master_id,
expected_last_instance_state,
expected_http_code):
self.report.log("Deleting a replication set: %s" % master_id)
master = self.get_instance(master_id)
replicas = self._get_replica_set(master_id)
instance_ids = zip([master], replicas)
for instance_id in instance_ids:
self.auth_client.instances.delete(instance_id)
self.assert_client_code(expected_http_code)
self.assert_all_gone(instance_ids, expected_last_instance_state)

View File

@ -0,0 +1,320 @@
# Copyright 2015 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time as timer
from proboscis import asserts
from troveclient.compat import exceptions
from oslo_config.cfg import NoSuchOptError
from trove.common import cfg
from trove.common import utils
from trove.common.utils import poll_until, build_polling_task
from trove.common import exception
from trove.tests.api.instances import instance_info
from trove.tests.config import CONFIG
from trove.tests.util import create_dbaas_client
from trove.tests.util.users import Requirements
CONF = cfg.CONF
class TestRunner(object):
"""
Base class for all 'Runner' classes.
The Runner classes are those that actually do the work. The 'Group'
classes are set up with decorators that control how the tests flow,
and are used to organized the tests - however they are typically set up
to just call a corresponding method in a Runner class.
A Runner class can be overridden if a particular set of tests
needs to have DataStore specific coding. The corresponding Group
class will try to first load a DataStore specific class, and then fall
back to the generic one if need be. For example,
the NegativeClusterActionsGroup class specifies a runner_base_name of
NegativeClusterActionsRunner. If the manager of the default
datastore is mongodb, then the MongodbNegativeClusterActionsRunner is
used instead. The prefix is created by capitalizing the name of the
manager - overriding classes *must* follow this naming convention
to be automatically used. The main assumption made here is that
if a manager is used for different datastore versions, then the
overriding runner should also be valid for the same datastore versions.
"""
USE_INSTANCE_ID_FLAG = 'TESTS_USE_INSTANCE_ID'
DO_NOT_DELETE_INSTANCE_FLAG = 'TESTS_DO_NOT_DELETE_INSTANCE'
VOLUME_SUPPORT = CONFIG.get('trove_volume_support', True)
EPHEMERAL_SUPPORT = not VOLUME_SUPPORT and CONFIG.get('device_path', None)
ROOT_PARTITION = not (VOLUME_SUPPORT or CONFIG.get('device_path', None))
def __init__(self, sleep_time=60, timeout=1200):
self.def_sleep_time = sleep_time
self.def_timeout = timeout
self.instance_info = instance_info
self.auth_client = create_dbaas_client(self.instance_info.user)
self.unauth_client = None
self.report = CONFIG.get_report()
self._test_helper = None
@classmethod
def fail(cls, message):
asserts.fail(message)
@classmethod
def assert_is_sublist(cls, sub_list, full_list, message=None):
return cls.assert_true(set(sub_list).issubset(full_list), message)
@classmethod
def assert_unique(cls, iterable, message=None):
"""Assert that a given iterable contains only unique elements.
"""
cls.assert_equal(len(iterable), len(set(iterable)), message)
@classmethod
def assert_true(cls, condition, message=None):
asserts.assert_true(condition, message=message)
@classmethod
def assert_false(cls, condition, message=None):
asserts.assert_false(condition, message=message)
@classmethod
def assert_is_none(cls, value, message=None):
asserts.assert_is_none(value, message=message)
@classmethod
def assert_is_not_none(cls, value, message=None):
asserts.assert_is_not_none(value, message=message)
@classmethod
def assert_list_elements_equal(cls, expected, actual, message=None):
"""Assert that two lists contain same elements
(with same multiplicities) ignoring the element order.
"""
return cls.assert_equal(sorted(expected), sorted(actual), message)
@classmethod
def assert_equal(cls, expected, actual, message=None):
if not message:
message = 'Unexpected value'
try:
message += ": '%s' (expected '%s')." % (actual, expected)
except TypeError:
pass
asserts.assert_equal(expected, actual, message=message)
@classmethod
def assert_not_equal(cls, expected, actual, message=None):
if not message:
message = 'Expected different value than'
try:
message += ": '%s'." % expected
except TypeError:
pass
asserts.assert_not_equal(expected, actual, message=message)
@property
def test_helper(self):
return self._test_helper
@test_helper.setter
def test_helper(self, test_helper):
self._test_helper = test_helper
def get_unauth_client(self):
if not self.unauth_client:
self.unauth_client = self._create_unauthorized_client()
return self.unauth_client
def _create_unauthorized_client(self, force=False):
"""Create a client from a different 'unauthorized' user
to facilitate negative testing.
"""
requirements = Requirements(is_admin=False)
other_user = CONFIG.users.find_user(
requirements, black_list=[self.instance_info.user.auth_user])
return create_dbaas_client(other_user)
def assert_raises(self, expected_exception, expected_http_code,
client_cmd, *cmd_args, **cmd_kwargs):
asserts.assert_raises(expected_exception, client_cmd,
*cmd_args, **cmd_kwargs)
self.assert_client_code(expected_http_code)
def get_datastore_config_property(self, name, datastore=None):
"""Get a Trove configuration property for a given datastore.
Use the current instance's datastore if None.
"""
try:
return CONF.get(
datastore or self.instance_info.dbaas_datastore).get(name)
except NoSuchOptError:
return CONF.get(name)
@property
def is_using_existing_instance(self):
return os.environ.get(self.USE_INSTANCE_ID_FLAG, None) is not None
def get_existing_instance(self):
if self.is_using_existing_instance:
instance_id = os.environ.get(self.USE_INSTANCE_ID_FLAG)
return self._get_instance_info(instance_id)
return None
@property
def has_do_not_delete_instance(self):
return os.environ.get(
self.DO_NOT_DELETE_INSTANCE_FLAG, None) is not None
def assert_instance_action(
self, instance_ids, expected_states, expected_http_code):
self.assert_client_code(expected_http_code)
if expected_states:
self.assert_all_instance_states(
instance_ids if utils.is_collection(instance_ids)
else [instance_ids], expected_states)
def assert_client_code(self, expected_http_code, client=None):
if expected_http_code is not None:
client = client or self.auth_client
self.assert_equal(expected_http_code, client.last_http_code,
"Unexpected client status code")
def assert_all_instance_states(self, instance_ids, expected_states):
tasks = [build_polling_task(
lambda: self._assert_instance_states(instance_id, expected_states),
sleep_time=self.def_sleep_time, time_out=self.def_timeout)
for instance_id in instance_ids]
poll_until(lambda: all(poll_task.ready() for poll_task in tasks),
sleep_time=self.def_sleep_time, time_out=self.def_timeout)
for task in tasks:
if task.has_result():
self.assert_true(
task.poll_result(),
"Some instances failed to acquire all expected states.")
elif task.has_exception():
self.fail(str(task.poll_exception()))
def _assert_instance_states(self, instance_id, expected_states,
fast_fail_status='ERROR'):
for status in expected_states:
start_time = timer.time()
try:
poll_until(lambda: self._has_status(
instance_id, status, fast_fail_status=fast_fail_status),
sleep_time=self.def_sleep_time,
time_out=self.def_timeout)
self.report.log("Instance has gone '%s' in %s." %
(status, self._time_since(start_time)))
except exception.PollTimeOut:
self.report.log(
"Status of instance '%s' did not change to '%s' after %s."
% (instance_id, status, self._time_since(start_time)))
return False
return True
def _time_since(self, start_time):
return '%.1fs' % (timer.time() - start_time)
def assert_all_gone(self, instance_ids, expected_last_status):
self._wait_all_deleted(instance_ids
if utils.is_collection(instance_ids)
else [instance_ids], expected_last_status)
def assert_pagination_match(
self, list_page, full_list, start_idx, end_idx):
self.assert_equal(full_list[start_idx:end_idx], list(list_page),
"List page does not match the expected full "
"list section.")
def _wait_all_deleted(self, instance_ids, expected_last_status):
tasks = [build_polling_task(
lambda: self._wait_for_delete(instance_id, expected_last_status),
sleep_time=self.def_sleep_time, time_out=self.def_timeout)
for instance_id in instance_ids]
poll_until(lambda: all(poll_task.ready() for poll_task in tasks),
sleep_time=self.def_sleep_time, time_out=self.def_timeout)
for task in tasks:
if task.has_result():
self.assert_true(
task.poll_result(),
"Some instances were not removed.")
elif task.has_exception():
self.fail(str(task.poll_exception()))
def _wait_for_delete(self, instance_id, expected_last_status):
start_time = timer.time()
try:
self._poll_while(instance_id, expected_last_status,
sleep_time=self.def_sleep_time,
time_out=self.def_timeout)
except exceptions.NotFound:
self.assert_client_code(404)
self.report.log("Instance was removed in %s." %
self._time_since(start_time))
return True
except exception.PollTimeOut:
self.report.log(
"Instance '%s' still existed after %s."
% (instance_id, self._time_since(start_time)))
return False
def _poll_while(self, instance_id, expected_status,
sleep_time=1, time_out=None):
poll_until(lambda: not self._has_status(instance_id, expected_status),
sleep_time=sleep_time, time_out=time_out)
def _has_status(self, instance_id, status, fast_fail_status=None):
instance = self.get_instance(instance_id)
self.report.log("Waiting for instance '%s' to become '%s': %s"
% (instance_id, status, instance.status))
if fast_fail_status and instance.status == fast_fail_status:
raise RuntimeError("Instance '%s' acquired a fast-fail status: %s"
% (instance_id, status))
return instance.status == status
def get_instance(self, instance_id):
return self.auth_client.instances.get(instance_id)
def get_instance_host(self, instance_id=None):
instance_id = instance_id or self.instance_info.id
instance = self.get_instance(instance_id)
host = str(instance._info['ip'][0])
self.report.log("Found host %s for instance %s." % (host, instance_id))
return host
def build_flavor(self, flavor_id=2, volume_size=1):
return {"flavorRef": flavor_id, "volume": {"size": volume_size}}
def get_flavor(self, flavor_name):
flavors = self.auth_client.find_flavors_by_name(flavor_name)
self.assert_equal(
1, len(flavors),
"Unexpected number of flavors with name '%s' found." % flavor_name)
flavor = flavors[0]
self.assert_is_not_none(flavor, "Flavor '%s' not found." % flavor_name)
return flavor