Merge "Add configuration support for clusters"
This commit is contained in:
commit
b5cf0c58f2
@ -0,0 +1,3 @@
|
||||
features:
|
||||
- Support attaching and detaching of configuration
|
||||
groups on clusters.
|
@ -492,13 +492,13 @@
|
||||
[
|
||||
"trove/configuration/service.py",
|
||||
"E1101",
|
||||
"Instance of 'BuiltInstance' has no 'update_overrides' member",
|
||||
"Instance of 'BuiltInstance' has no 'update_configuration' member",
|
||||
"ConfigurationsController._refresh_on_all_instances"
|
||||
],
|
||||
[
|
||||
"trove/configuration/service.py",
|
||||
"no-member",
|
||||
"Instance of 'BuiltInstance' has no 'update_overrides' member",
|
||||
"Instance of 'BuiltInstance' has no 'update_configuration' member",
|
||||
"ConfigurationsController._refresh_on_all_instances"
|
||||
],
|
||||
[
|
||||
@ -753,6 +753,18 @@
|
||||
"Instance of 'Table' has no 'create_column' member",
|
||||
"upgrade"
|
||||
],
|
||||
[
|
||||
"trove/db/sqlalchemy/migrate_repo/versions/042_add_cluster_configuration_id.py",
|
||||
"E1101",
|
||||
"Instance of 'Table' has no 'create_column' member",
|
||||
"upgrade"
|
||||
],
|
||||
[
|
||||
"trove/db/sqlalchemy/migrate_repo/versions/042_add_cluster_configuration_id.py",
|
||||
"no-member",
|
||||
"Instance of 'Table' has no 'create_column' member",
|
||||
"upgrade"
|
||||
],
|
||||
[
|
||||
"trove/db/sqlalchemy/migration.py",
|
||||
"E0611",
|
||||
@ -1299,18 +1311,6 @@
|
||||
"Instance of 'BuiltInstance' has no 'restart' member",
|
||||
"Manager.restart"
|
||||
],
|
||||
[
|
||||
"trove/taskmanager/manager.py",
|
||||
"E1101",
|
||||
"Instance of 'BuiltInstance' has no 'unassign_configuration' member",
|
||||
"Manager.unassign_configuration"
|
||||
],
|
||||
[
|
||||
"trove/taskmanager/manager.py",
|
||||
"E1101",
|
||||
"Instance of 'BuiltInstance' has no 'update_overrides' member",
|
||||
"Manager.update_overrides"
|
||||
],
|
||||
[
|
||||
"trove/taskmanager/manager.py",
|
||||
"E1101",
|
||||
@ -1383,18 +1383,6 @@
|
||||
"Instance of 'BuiltInstance' has no 'restart' member",
|
||||
"Manager.restart"
|
||||
],
|
||||
[
|
||||
"trove/taskmanager/manager.py",
|
||||
"no-member",
|
||||
"Instance of 'BuiltInstance' has no 'unassign_configuration' member",
|
||||
"Manager.unassign_configuration"
|
||||
],
|
||||
[
|
||||
"trove/taskmanager/manager.py",
|
||||
"no-member",
|
||||
"Instance of 'BuiltInstance' has no 'update_overrides' member",
|
||||
"Manager.update_overrides"
|
||||
],
|
||||
[
|
||||
"trove/taskmanager/manager.py",
|
||||
"no-member",
|
||||
|
@ -13,6 +13,8 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import six
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from novaclient import exceptions as nova_exceptions
|
||||
@ -21,18 +23,27 @@ from trove.cluster.tasks import ClusterTasks
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.common.i18n import _
|
||||
from trove.common.notification import (DBaaSClusterGrow, DBaaSClusterShrink,
|
||||
DBaaSClusterResetStatus,
|
||||
DBaaSClusterRestart)
|
||||
from trove.common.notification import (
|
||||
DBaaSClusterAttachConfiguration,
|
||||
DBaaSClusterDetachConfiguration,
|
||||
DBaaSClusterGrow,
|
||||
DBaaSClusterShrink,
|
||||
DBaaSClusterResetStatus,
|
||||
DBaaSClusterRestart)
|
||||
from trove.common.notification import DBaaSClusterUpgrade
|
||||
from trove.common.notification import DBaaSInstanceAttachConfiguration
|
||||
from trove.common.notification import DBaaSInstanceDetachConfiguration
|
||||
from trove.common.notification import EndNotification
|
||||
from trove.common.notification import StartNotification
|
||||
from trove.common import remote
|
||||
from trove.common import server_group as srv_grp
|
||||
from trove.common.strategies.cluster import strategy
|
||||
from trove.common import utils
|
||||
from trove.configuration import models as config_models
|
||||
from trove.datastore import models as datastore_models
|
||||
from trove.db import models as dbmodels
|
||||
from trove.instance import models as inst_models
|
||||
from trove.instance.tasks import InstanceTasks
|
||||
from trove.taskmanager import api as task_api
|
||||
|
||||
|
||||
@ -49,7 +60,7 @@ def persisted_models():
|
||||
class DBCluster(dbmodels.DatabaseModelBase):
|
||||
_data_fields = ['id', 'created', 'updated', 'name', 'task_id',
|
||||
'tenant_id', 'datastore_version_id', 'deleted',
|
||||
'deleted_at']
|
||||
'deleted_at', 'configuration_id']
|
||||
|
||||
def __init__(self, task_status, **kwargs):
|
||||
"""
|
||||
@ -140,7 +151,6 @@ class Cluster(object):
|
||||
self.update_db(task_status=ClusterTasks.NONE)
|
||||
|
||||
def reset_status(self):
|
||||
self.validate_cluster_available([ClusterTasks.BUILDING_INITIAL])
|
||||
LOG.info(_("Resetting status to NONE on cluster %s") % self.id)
|
||||
self.reset_task()
|
||||
instances = inst_models.DBInstance.find_all(cluster_id=self.id,
|
||||
@ -197,6 +207,10 @@ class Cluster(object):
|
||||
def deleted_at(self):
|
||||
return self.db_info.deleted_at
|
||||
|
||||
@property
|
||||
def configuration_id(self):
|
||||
return self.db_info.configuration_id
|
||||
|
||||
@property
|
||||
def db_instances(self):
|
||||
"""DBInstance objects are persistent, therefore cacheable."""
|
||||
@ -246,14 +260,14 @@ class Cluster(object):
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version,
|
||||
instances, extended_properties, locality):
|
||||
instances, extended_properties, locality, configuration):
|
||||
locality = srv_grp.ServerGroup.build_scheduler_hint(
|
||||
context, locality, name)
|
||||
api_strategy = strategy.load_api_strategy(datastore_version.manager)
|
||||
return api_strategy.cluster_class.create(context, name, datastore,
|
||||
datastore_version, instances,
|
||||
extended_properties,
|
||||
locality)
|
||||
locality, configuration)
|
||||
|
||||
def validate_cluster_available(self, valid_states=[ClusterTasks.NONE]):
|
||||
if self.db_info.task_status not in valid_states:
|
||||
@ -304,6 +318,11 @@ class Cluster(object):
|
||||
if 'availability_zone' in node:
|
||||
instance['availability_zone'] = (
|
||||
node['availability_zone'])
|
||||
if 'type' in node:
|
||||
instance_type = node['type']
|
||||
if isinstance(instance_type, six.string_types):
|
||||
instance_type = instance_type.split(',')
|
||||
instance['instance_type'] = instance_type
|
||||
instances.append(instance)
|
||||
return self.grow(instances)
|
||||
elif action == 'shrink':
|
||||
@ -328,7 +347,23 @@ class Cluster(object):
|
||||
dv = datastore_models.DatastoreVersion.load(self.datastore, dv_id)
|
||||
with StartNotification(context, cluster_id=self.id,
|
||||
datastore_version=dv.id):
|
||||
return self.upgrade(dv)
|
||||
self.upgrade(dv)
|
||||
self.update_db(datastore_version_id=dv.id)
|
||||
|
||||
elif action == 'configuration_attach':
|
||||
configuration_id = param['configuration_id']
|
||||
context.notification = DBaaSClusterAttachConfiguration(context,
|
||||
request=req)
|
||||
with StartNotification(context, cluster_id=self.id,
|
||||
configuration_id=configuration_id):
|
||||
return self.configuration_attach(configuration_id)
|
||||
|
||||
elif action == 'configuration_detach':
|
||||
context.notification = DBaaSClusterDetachConfiguration(context,
|
||||
request=req)
|
||||
with StartNotification(context, cluster_id=self.id):
|
||||
return self.configuration_detach()
|
||||
|
||||
else:
|
||||
raise exception.BadRequest(_("Action %s not supported") % action)
|
||||
|
||||
@ -376,6 +411,128 @@ class Cluster(object):
|
||||
def upgrade(self, datastore_version):
|
||||
raise exception.BadRequest(_("Action 'upgrade' not supported"))
|
||||
|
||||
def configuration_attach(self, configuration_id):
|
||||
raise exception.BadRequest(
|
||||
_("Action 'configuration_attach' not supported"))
|
||||
|
||||
def rolling_configuration_update(self, configuration_id,
|
||||
apply_on_all=True):
|
||||
cluster_notification = self.context.notification
|
||||
request_info = cluster_notification.serialize(self.context)
|
||||
self.validate_cluster_available()
|
||||
self.db_info.update(task_status=ClusterTasks.UPDATING_CLUSTER)
|
||||
try:
|
||||
configuration = config_models.Configuration.find(
|
||||
self.context, configuration_id, self.datastore_version.id)
|
||||
instances = [inst_models.Instance.load(self.context, instance.id)
|
||||
for instance in self.instances]
|
||||
|
||||
LOG.debug("Persisting changes on cluster nodes.")
|
||||
# Allow re-applying the same configuration (e.g. on configuration
|
||||
# updates).
|
||||
for instance in instances:
|
||||
if not (instance.configuration and
|
||||
instance.configuration.id != configuration_id):
|
||||
self.context.notification = (
|
||||
DBaaSInstanceAttachConfiguration(self.context,
|
||||
**request_info))
|
||||
with StartNotification(self.context,
|
||||
instance_id=instance.id,
|
||||
configuration_id=configuration_id):
|
||||
with EndNotification(self.context):
|
||||
instance.save_configuration(configuration)
|
||||
else:
|
||||
LOG.debug(
|
||||
"Node '%s' already has the configuration '%s' "
|
||||
"attached." % (instance.id, configuration_id))
|
||||
|
||||
# Configuration has been persisted to all instances.
|
||||
# The cluster is in a consistent state with all nodes
|
||||
# requiring restart.
|
||||
# We therefore assign the configuration group ID now.
|
||||
# The configuration can be safely detached at this point.
|
||||
self.update_db(configuration_id=configuration_id)
|
||||
|
||||
LOG.debug("Applying runtime configuration changes.")
|
||||
if instances[0].apply_configuration(configuration):
|
||||
LOG.debug(
|
||||
"Runtime changes have been applied successfully to the "
|
||||
"first node.")
|
||||
remaining_nodes = instances[1:]
|
||||
if apply_on_all:
|
||||
LOG.debug(
|
||||
"Applying the changes to the remaining nodes.")
|
||||
for instance in remaining_nodes:
|
||||
instance.apply_configuration(configuration)
|
||||
else:
|
||||
LOG.debug(
|
||||
"Releasing restart-required task on the remaining "
|
||||
"nodes.")
|
||||
for instance in remaining_nodes:
|
||||
instance.update_db(task_status=InstanceTasks.NONE)
|
||||
finally:
|
||||
self.update_db(task_status=ClusterTasks.NONE)
|
||||
|
||||
return self.__class__(self.context, self.db_info,
|
||||
self.ds, self.ds_version)
|
||||
|
||||
def configuration_detach(self):
|
||||
raise exception.BadRequest(
|
||||
_("Action 'configuration_detach' not supported"))
|
||||
|
||||
def rolling_configuration_remove(self, apply_on_all=True):
|
||||
cluster_notification = self.context.notification
|
||||
request_info = cluster_notification.serialize(self.context)
|
||||
self.validate_cluster_available()
|
||||
self.db_info.update(task_status=ClusterTasks.UPDATING_CLUSTER)
|
||||
try:
|
||||
instances = [inst_models.Instance.load(self.context, instance.id)
|
||||
for instance in self.instances]
|
||||
|
||||
LOG.debug("Removing changes from cluster nodes.")
|
||||
for instance in instances:
|
||||
if instance.configuration:
|
||||
self.context.notification = (
|
||||
DBaaSInstanceDetachConfiguration(self.context,
|
||||
**request_info))
|
||||
with StartNotification(self.context,
|
||||
instance_id=instance.id):
|
||||
with EndNotification(self.context):
|
||||
instance.delete_configuration()
|
||||
else:
|
||||
LOG.debug(
|
||||
"Node '%s' has no configuration attached."
|
||||
% instance.id)
|
||||
|
||||
# The cluster is in a consistent state with all nodes
|
||||
# requiring restart.
|
||||
# New configuration can be safely attached at this point.
|
||||
configuration_id = self.configuration_id
|
||||
self.update_db(configuration_id=None)
|
||||
|
||||
LOG.debug("Applying runtime configuration changes.")
|
||||
if instances[0].reset_configuration(configuration_id):
|
||||
LOG.debug(
|
||||
"Runtime changes have been applied successfully to the "
|
||||
"first node.")
|
||||
remaining_nodes = instances[1:]
|
||||
if apply_on_all:
|
||||
LOG.debug(
|
||||
"Applying the changes to the remaining nodes.")
|
||||
for instance in remaining_nodes:
|
||||
instance.reset_configuration(configuration_id)
|
||||
else:
|
||||
LOG.debug(
|
||||
"Releasing restart-required task on the remaining "
|
||||
"nodes.")
|
||||
for instance in remaining_nodes:
|
||||
instance.update_db(task_status=InstanceTasks.NONE)
|
||||
finally:
|
||||
self.update_db(task_status=ClusterTasks.NONE)
|
||||
|
||||
return self.__class__(self.context, self.db_info,
|
||||
self.ds, self.ds_version)
|
||||
|
||||
@staticmethod
|
||||
def load_instance(context, cluster_id, instance_id):
|
||||
return inst_models.load_instance_with_info(
|
||||
|
@ -218,6 +218,8 @@ class ClusterController(wsgi.Controller):
|
||||
if locality not in locality_domain:
|
||||
raise exception.BadRequest(msg=locality_domain_msg)
|
||||
|
||||
configuration = body['cluster'].get('configuration')
|
||||
|
||||
context.notification = notification.DBaaSClusterCreate(context,
|
||||
request=req)
|
||||
with StartNotification(context, name=name, datastore=datastore.name,
|
||||
@ -225,7 +227,7 @@ class ClusterController(wsgi.Controller):
|
||||
cluster = models.Cluster.create(context, name, datastore,
|
||||
datastore_version, instances,
|
||||
extended_properties,
|
||||
locality)
|
||||
locality, configuration)
|
||||
cluster.locality = locality
|
||||
view = views.load_view(cluster, req=req, load_servers=False)
|
||||
return wsgi.Result(view.data(), 200)
|
||||
|
@ -73,6 +73,8 @@ class ClusterTasks(object):
|
||||
0x07, 'UPGRADING_CLUSTER', 'Upgrading the cluster to new version.')
|
||||
RESTARTING_CLUSTER = ClusterTask(
|
||||
0x08, 'RESTARTING_CLUSTER', 'Restarting the cluster.')
|
||||
UPDATING_CLUSTER = ClusterTask(
|
||||
0x09, 'UPDATING_CLUSTER', 'Updating cluster configuration.')
|
||||
|
||||
|
||||
# Dissuade further additions at run-time.
|
||||
|
@ -55,6 +55,8 @@ class ClusterView(object):
|
||||
|
||||
if self.cluster.locality:
|
||||
cluster_dict['locality'] = self.cluster.locality
|
||||
if self.cluster.configuration_id:
|
||||
cluster_dict['configuration'] = self.cluster.configuration_id
|
||||
LOG.debug(cluster_dict)
|
||||
return {"cluster": cluster_dict}
|
||||
|
||||
|
@ -236,6 +236,11 @@ class UnprocessableEntity(TroveError):
|
||||
message = _("Unable to process the contained request.")
|
||||
|
||||
|
||||
class ConfigurationNotSupported(UnprocessableEntity):
|
||||
|
||||
message = _("Configuration groups not supported by the datastore.")
|
||||
|
||||
|
||||
class CannotResizeToSameSize(TroveError):
|
||||
|
||||
message = _("No change was requested in the size of the instance.")
|
||||
|
@ -549,6 +549,28 @@ class DBaaSInstanceDetachConfiguration(DBaaSAPINotification):
|
||||
return ['instance_id']
|
||||
|
||||
|
||||
class DBaaSClusterAttachConfiguration(DBaaSAPINotification):
|
||||
|
||||
@abc.abstractmethod
|
||||
def event_type(self):
|
||||
return 'cluster_attach_configuration'
|
||||
|
||||
@abc.abstractmethod
|
||||
def required_start_traits(self):
|
||||
return ['cluster_id', 'configuration_id']
|
||||
|
||||
|
||||
class DBaaSClusterDetachConfiguration(DBaaSAPINotification):
|
||||
|
||||
@abc.abstractmethod
|
||||
def event_type(self):
|
||||
return 'cluster_detach_configuration'
|
||||
|
||||
@abc.abstractmethod
|
||||
def required_start_traits(self):
|
||||
return ['cluster_id']
|
||||
|
||||
|
||||
class DBaaSClusterCreate(DBaaSAPINotification):
|
||||
|
||||
@abc.abstractmethod
|
||||
|
@ -82,19 +82,20 @@ class CassandraCluster(models.Cluster):
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version,
|
||||
instances, extended_properties, locality):
|
||||
instances, extended_properties, locality, configuration):
|
||||
LOG.debug("Processing a request for creating a new cluster.")
|
||||
|
||||
# Updating Cluster Task.
|
||||
db_info = models.DBCluster.create(
|
||||
name=name, tenant_id=context.tenant,
|
||||
datastore_version_id=datastore_version.id,
|
||||
task_status=ClusterTasks.BUILDING_INITIAL)
|
||||
task_status=ClusterTasks.BUILDING_INITIAL,
|
||||
configuration_id=configuration)
|
||||
|
||||
cls._create_cluster_instances(
|
||||
context, db_info.id, db_info.name,
|
||||
datastore, datastore_version, instances, extended_properties,
|
||||
locality)
|
||||
locality, configuration)
|
||||
|
||||
# Calling taskmanager to further proceed for cluster-configuration.
|
||||
task_api.load(context, datastore_version.manager).create_cluster(
|
||||
@ -106,7 +107,7 @@ class CassandraCluster(models.Cluster):
|
||||
def _create_cluster_instances(
|
||||
cls, context, cluster_id, cluster_name,
|
||||
datastore, datastore_version, instances, extended_properties,
|
||||
locality):
|
||||
locality, configuration_id):
|
||||
LOG.debug("Processing a request for new cluster instances.")
|
||||
|
||||
cassandra_conf = CONF.get(datastore_version.manager)
|
||||
@ -153,7 +154,7 @@ class CassandraCluster(models.Cluster):
|
||||
instance['volume_size'], None,
|
||||
nics=instance.get('nics', None),
|
||||
availability_zone=instance_az,
|
||||
configuration_id=None,
|
||||
configuration_id=configuration_id,
|
||||
cluster_config=member_config,
|
||||
modules=instance.get('modules'),
|
||||
locality=locality,
|
||||
@ -180,9 +181,11 @@ class CassandraCluster(models.Cluster):
|
||||
db_info.update(task_status=ClusterTasks.GROWING_CLUSTER)
|
||||
|
||||
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
|
||||
configuration_id = self.db_info.configuration_id
|
||||
|
||||
new_instances = self._create_cluster_instances(
|
||||
context, db_info.id, db_info.name, datastore, datastore_version,
|
||||
instances, None, locality)
|
||||
instances, None, locality, configuration_id)
|
||||
|
||||
task_api.load(context, datastore_version.manager).grow_cluster(
|
||||
db_info.id, [instance.id for instance in new_instances])
|
||||
@ -212,6 +215,12 @@ class CassandraCluster(models.Cluster):
|
||||
def upgrade(self, datastore_version):
|
||||
self.rolling_upgrade(datastore_version)
|
||||
|
||||
def configuration_attach(self, configuration_id):
|
||||
self.rolling_configuration_update(configuration_id, apply_on_all=False)
|
||||
|
||||
def configuration_detach(self):
|
||||
self.rolling_configuration_remove(apply_on_all=False)
|
||||
|
||||
|
||||
class CassandraClusterView(ClusterView):
|
||||
|
||||
|
@ -97,7 +97,8 @@ class GaleraCommonCluster(cluster_models.Cluster):
|
||||
|
||||
@staticmethod
|
||||
def _create_instances(context, db_info, datastore, datastore_version,
|
||||
instances, extended_properties, locality):
|
||||
instances, extended_properties, locality,
|
||||
configuration_id):
|
||||
member_config = {"id": db_info.id,
|
||||
"instance_type": "member"}
|
||||
name_index = 1
|
||||
@ -118,7 +119,7 @@ class GaleraCommonCluster(cluster_models.Cluster):
|
||||
availability_zone=instance.get(
|
||||
'availability_zone', None),
|
||||
nics=instance.get('nics', None),
|
||||
configuration_id=None,
|
||||
configuration_id=configuration_id,
|
||||
cluster_config=member_config,
|
||||
modules=instance.get('modules'),
|
||||
locality=locality,
|
||||
@ -128,7 +129,7 @@ class GaleraCommonCluster(cluster_models.Cluster):
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version,
|
||||
instances, extended_properties, locality):
|
||||
instances, extended_properties, locality, configuration):
|
||||
LOG.debug("Initiating Galera cluster creation.")
|
||||
cls._validate_cluster_instances(context, instances, datastore,
|
||||
datastore_version)
|
||||
@ -136,10 +137,12 @@ class GaleraCommonCluster(cluster_models.Cluster):
|
||||
db_info = cluster_models.DBCluster.create(
|
||||
name=name, tenant_id=context.tenant,
|
||||
datastore_version_id=datastore_version.id,
|
||||
task_status=ClusterTasks.BUILDING_INITIAL)
|
||||
task_status=ClusterTasks.BUILDING_INITIAL,
|
||||
configuration_id=configuration)
|
||||
|
||||
cls._create_instances(context, db_info, datastore, datastore_version,
|
||||
instances, extended_properties, locality)
|
||||
instances, extended_properties, locality,
|
||||
configuration)
|
||||
|
||||
# Calling taskmanager to further proceed for cluster-configuration
|
||||
task_api.load(context, datastore_version.manager).create_cluster(
|
||||
@ -160,9 +163,10 @@ class GaleraCommonCluster(cluster_models.Cluster):
|
||||
db_info.update(task_status=ClusterTasks.GROWING_CLUSTER)
|
||||
try:
|
||||
locality = srv_grp.ServerGroup.convert_to_hint(self.server_group)
|
||||
configuration_id = self.db_info.configuration_id
|
||||
new_instances = self._create_instances(
|
||||
context, db_info, datastore, datastore_version, instances,
|
||||
None, locality)
|
||||
None, locality, configuration_id)
|
||||
|
||||
task_api.load(context, datastore_version.manager).grow_cluster(
|
||||
db_info.id, [instance.id for instance in new_instances])
|
||||
@ -203,6 +207,12 @@ class GaleraCommonCluster(cluster_models.Cluster):
|
||||
def upgrade(self, datastore_version):
|
||||
self.rolling_upgrade(datastore_version)
|
||||
|
||||
def configuration_attach(self, configuration_id):
|
||||
self.rolling_configuration_update(configuration_id)
|
||||
|
||||
def configuration_detach(self):
|
||||
self.rolling_configuration_remove()
|
||||
|
||||
|
||||
class GaleraCommonClusterView(ClusterView):
|
||||
|
||||
|
@ -58,7 +58,10 @@ class MongoDbCluster(models.Cluster):
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version,
|
||||
instances, extended_properties, locality):
|
||||
instances, extended_properties, locality, configuration):
|
||||
|
||||
if configuration:
|
||||
raise exception.ConfigurationNotSupported()
|
||||
|
||||
# TODO(amcreynolds): consider moving into CONF and even supporting
|
||||
# TODO(amcreynolds): an array of values, e.g. [3, 5, 7]
|
||||
|
@ -97,9 +97,12 @@ class RedisCluster(models.Cluster):
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version,
|
||||
instances, extended_properties, locality):
|
||||
instances, extended_properties, locality, configuration):
|
||||
LOG.debug("Initiating cluster creation.")
|
||||
|
||||
if configuration:
|
||||
raise exception.ConfigurationNotSupported()
|
||||
|
||||
# Updating Cluster Task
|
||||
|
||||
db_info = models.DBCluster.create(
|
||||
|
@ -129,9 +129,12 @@ class VerticaCluster(models.Cluster):
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version,
|
||||
instances, extended_properties, locality):
|
||||
instances, extended_properties, locality, configuration):
|
||||
LOG.debug("Initiating cluster creation.")
|
||||
|
||||
if configuration:
|
||||
raise exception.ConfigurationNotSupported()
|
||||
|
||||
vertica_conf = CONF.get(datastore_version.manager)
|
||||
num_instances = len(instances)
|
||||
|
||||
|
@ -209,6 +209,21 @@ class Configuration(object):
|
||||
item["deleted_at"] = None
|
||||
DBConfigurationParameter.save(item)
|
||||
|
||||
@staticmethod
|
||||
def find(context, configuration_id, datastore_version_id):
|
||||
try:
|
||||
info = Configuration.load(context, configuration_id)
|
||||
if (info.datastore_version_id == datastore_version_id):
|
||||
return Configuration(context, configuration_id)
|
||||
except exception.ModelNotFoundError:
|
||||
raise exception.NotFound(
|
||||
message='Configuration group id: %s could not be found.'
|
||||
% configuration_id)
|
||||
|
||||
raise exception.ConfigurationDatastoreNotMatchInstance(
|
||||
config_datastore_version=info.datastore_version_id,
|
||||
instance_datastore_version=datastore_version_id)
|
||||
|
||||
|
||||
class DBConfiguration(dbmodels.DatabaseModelBase):
|
||||
_data_fields = ['name', 'description', 'tenant_id', 'datastore_version_id',
|
||||
@ -256,6 +271,7 @@ class DBDatastoreConfigurationParameters(dbmodels.DatabaseModelBase):
|
||||
|
||||
|
||||
class DatastoreConfigurationParameters(object):
|
||||
|
||||
def __init__(self, db_info):
|
||||
self.db_info = db_info
|
||||
|
||||
|
@ -18,6 +18,7 @@ from datetime import datetime
|
||||
from oslo_log import log as logging
|
||||
import six
|
||||
|
||||
from trove.cluster import models as cluster_models
|
||||
import trove.common.apischema as apischema
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
@ -198,6 +199,8 @@ class ConfigurationsController(wsgi.Controller):
|
||||
deleted_at)
|
||||
models.Configuration.save(group, items)
|
||||
self._refresh_on_all_instances(context, id)
|
||||
self._refresh_on_all_clusters(context, id)
|
||||
|
||||
return wsgi.Result(None, 202)
|
||||
|
||||
def edit(self, req, body, tenant_id, id):
|
||||
@ -211,25 +214,41 @@ class ConfigurationsController(wsgi.Controller):
|
||||
body['configuration'])
|
||||
models.Configuration.save(group, items)
|
||||
self._refresh_on_all_instances(context, id)
|
||||
self._refresh_on_all_clusters(context, id)
|
||||
|
||||
def _refresh_on_all_instances(self, context, configuration_id):
|
||||
"""Refresh a configuration group on all its instances.
|
||||
"""Refresh a configuration group on all single instances.
|
||||
"""
|
||||
dbinstances = instances_models.DBInstance.find_all(
|
||||
LOG.debug("Re-applying configuration group '%s' to all instances."
|
||||
% configuration_id)
|
||||
single_instances = instances_models.DBInstance.find_all(
|
||||
tenant_id=context.tenant,
|
||||
configuration_id=configuration_id,
|
||||
cluster_id=None,
|
||||
deleted=False).all()
|
||||
|
||||
config = models.Configuration(context, configuration_id)
|
||||
for dbinstance in single_instances:
|
||||
LOG.debug("Re-applying configuration to instance: %s"
|
||||
% dbinstance.id)
|
||||
instance = instances_models.Instance.load(context, dbinstance.id)
|
||||
instance.update_configuration(config)
|
||||
|
||||
def _refresh_on_all_clusters(self, context, configuration_id):
|
||||
"""Refresh a configuration group on all clusters.
|
||||
"""
|
||||
LOG.debug("Re-applying configuration group '%s' to all clusters."
|
||||
% configuration_id)
|
||||
clusters = cluster_models.DBCluster.find_all(
|
||||
tenant_id=context.tenant,
|
||||
configuration_id=configuration_id,
|
||||
deleted=False).all()
|
||||
|
||||
LOG.debug(
|
||||
"All instances with configuration group '%s' on tenant '%s': %s"
|
||||
% (configuration_id, context.tenant, dbinstances))
|
||||
|
||||
config = models.Configuration(context, configuration_id)
|
||||
for dbinstance in dbinstances:
|
||||
LOG.debug("Applying configuration group '%s' to instance: %s"
|
||||
% (configuration_id, dbinstance.id))
|
||||
instance = instances_models.Instance.load(context, dbinstance.id)
|
||||
instance.update_overrides(config)
|
||||
for dbcluster in clusters:
|
||||
LOG.debug("Re-applying configuration to cluster: %s"
|
||||
% dbcluster.id)
|
||||
cluster = cluster_models.Cluster.load(context, dbcluster.id)
|
||||
cluster.configuration_attach(configuration_id)
|
||||
|
||||
def _configuration_items_list(self, group, configuration):
|
||||
ds_version_id = group.datastore_version_id
|
||||
|
@ -0,0 +1,38 @@
|
||||
# Copyright 2016 Tesora, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_log import log as logging
|
||||
from sqlalchemy import ForeignKey
|
||||
from sqlalchemy.schema import Column
|
||||
from sqlalchemy.schema import MetaData
|
||||
|
||||
from trove.common import cfg
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import String
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import Table
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema')
|
||||
|
||||
meta = MetaData()
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta.bind = migrate_engine
|
||||
# Load 'configurations' table to MetaData.
|
||||
Table('configurations', meta, autoload=True, autoload_with=migrate_engine)
|
||||
instances = Table('clusters', meta, autoload=True)
|
||||
instances.create_column(Column('configuration_id', String(36),
|
||||
ForeignKey("configurations.id")))
|
@ -1282,11 +1282,6 @@ class Instance(BuiltInstance):
|
||||
Raises exception if a configuration assign cannot
|
||||
currently be performed
|
||||
"""
|
||||
# check if the instance already has a configuration assigned
|
||||
if self.db_info.configuration_id:
|
||||
raise exception.ConfigurationAlreadyAttached(
|
||||
instance_id=self.id,
|
||||
configuration_id=self.db_info.configuration_id)
|
||||
|
||||
# check if the instance is not ACTIVE or has tasks
|
||||
status = None
|
||||
@ -1299,23 +1294,118 @@ class Instance(BuiltInstance):
|
||||
raise exception.InvalidInstanceState(instance_id=self.id,
|
||||
status=status)
|
||||
|
||||
def unassign_configuration(self):
|
||||
LOG.debug("Unassigning the configuration from the instance %s.",
|
||||
self.id)
|
||||
def attach_configuration(self, configuration_id):
|
||||
LOG.debug("Attaching configuration to instance: %s", self.id)
|
||||
if not self.db_info.configuration_id:
|
||||
self._validate_can_perform_assign()
|
||||
LOG.debug("Attaching configuration: %s", configuration_id)
|
||||
config = Configuration.find(self.context, configuration_id,
|
||||
self.db_info.datastore_version_id)
|
||||
self.update_configuration(config)
|
||||
else:
|
||||
raise exception.ConfigurationAlreadyAttached(
|
||||
instance_id=self.id,
|
||||
configuration_id=self.db_info.configuration_id)
|
||||
|
||||
def update_configuration(self, configuration):
|
||||
self.save_configuration(configuration)
|
||||
return self.apply_configuration(configuration)
|
||||
|
||||
def save_configuration(self, configuration):
|
||||
"""Save configuration changes on the guest.
|
||||
Update Trove records if successful.
|
||||
This method does not update runtime values. It sets the instance task
|
||||
to RESTART_REQUIRED.
|
||||
"""
|
||||
|
||||
LOG.debug("Saving configuration on instance: %s", self.id)
|
||||
overrides = configuration.get_configuration_overrides()
|
||||
|
||||
# Always put the instance into RESTART_REQUIRED state after
|
||||
# configuration update. The sate may be released only once (and if)
|
||||
# the configuration is successfully applied.
|
||||
# This ensures that the instance will always be in a consistent state
|
||||
# even if the apply never executes or fails.
|
||||
LOG.debug("Persisting new configuration on the guest.")
|
||||
self.guest.update_overrides(overrides)
|
||||
LOG.debug("Configuration has been persisted on the guest.")
|
||||
|
||||
# Configuration has now been persisted on the instance an can be safely
|
||||
# detached. Update our records to reflect this change irrespective of
|
||||
# results of any further operations.
|
||||
self.update_db(task_status=InstanceTasks.RESTART_REQUIRED,
|
||||
configuration_id=configuration.configuration_id)
|
||||
|
||||
def apply_configuration(self, configuration):
|
||||
"""Apply runtime configuration changes and release the
|
||||
RESTART_REQUIRED task.
|
||||
Apply changes only if ALL values can be applied at once.
|
||||
Return True if the configuration has changed.
|
||||
"""
|
||||
|
||||
LOG.debug("Applying configuration on instance: %s", self.id)
|
||||
overrides = configuration.get_configuration_overrides()
|
||||
|
||||
if not configuration.does_configuration_need_restart():
|
||||
LOG.debug("Applying runtime configuration changes.")
|
||||
self.guest.apply_overrides(overrides)
|
||||
LOG.debug("Configuration has been applied.")
|
||||
self.update_db(task_status=InstanceTasks.NONE)
|
||||
|
||||
return True
|
||||
|
||||
LOG.debug(
|
||||
"Configuration changes include non-dynamic settings and "
|
||||
"will require restart to take effect.")
|
||||
|
||||
return False
|
||||
|
||||
def detach_configuration(self):
|
||||
LOG.debug("Detaching configuration from instance: %s", self.id)
|
||||
if self.configuration and self.configuration.id:
|
||||
LOG.debug("Unassigning the configuration id %s.",
|
||||
self.configuration.id)
|
||||
self._validate_can_perform_assign()
|
||||
LOG.debug("Detaching configuration: %s", self.configuration.id)
|
||||
self.remove_configuration()
|
||||
else:
|
||||
LOG.debug("No configuration found on instance.")
|
||||
|
||||
self.guest.update_overrides({}, remove=True)
|
||||
def remove_configuration(self):
|
||||
configuration_id = self.delete_configuration()
|
||||
return self.reset_configuration(configuration_id)
|
||||
|
||||
# Dynamically reset the configuration values back to their default
|
||||
# values from the configuration template.
|
||||
# Reset the values only if the default is available for all of
|
||||
# them and restart is not required by any.
|
||||
# Mark the instance with a 'RESTART_REQUIRED' status otherwise.
|
||||
def delete_configuration(self):
|
||||
"""Remove configuration changes from the guest.
|
||||
Update Trove records if successful.
|
||||
This method does not update runtime values. It sets the instance task
|
||||
to RESTART_REQUIRED.
|
||||
Return ID of the removed configuration group.
|
||||
"""
|
||||
LOG.debug("Deleting configuration from instance: %s", self.id)
|
||||
configuration_id = self.configuration.id
|
||||
|
||||
LOG.debug("Removing configuration from the guest.")
|
||||
self.guest.update_overrides({}, remove=True)
|
||||
LOG.debug("Configuration has been removed from the guest.")
|
||||
|
||||
self.update_db(task_status=InstanceTasks.RESTART_REQUIRED,
|
||||
configuration_id=None)
|
||||
|
||||
return configuration_id
|
||||
|
||||
def reset_configuration(self, configuration_id):
|
||||
"""Dynamically reset the configuration values back to their default
|
||||
values from the configuration template and release the
|
||||
RESTART_REQUIRED task.
|
||||
Reset the values only if the default is available for all of
|
||||
them and restart is not required by any.
|
||||
Return True if the configuration has changed.
|
||||
"""
|
||||
|
||||
LOG.debug("Resetting configuration on instance: %s", self.id)
|
||||
if configuration_id:
|
||||
flavor = self.get_flavor()
|
||||
default_config = self._render_config_dict(flavor)
|
||||
current_config = Configuration(self.context, self.configuration.id)
|
||||
current_config = Configuration(self.context, configuration_id)
|
||||
current_overrides = current_config.get_configuration_overrides()
|
||||
# Check the configuration template has defaults for all modified
|
||||
# values.
|
||||
@ -1323,56 +1413,22 @@ class Instance(BuiltInstance):
|
||||
for key in current_overrides.keys())
|
||||
if (not current_config.does_configuration_need_restart() and
|
||||
has_defaults_for_all):
|
||||
LOG.debug("Applying runtime configuration changes.")
|
||||
self.guest.apply_overrides(
|
||||
{k: v for k, v in default_config.items()
|
||||
if k in current_overrides})
|
||||
LOG.debug("Configuration has been applied.")
|
||||
self.update_db(task_status=InstanceTasks.NONE)
|
||||
|
||||
return True
|
||||
else:
|
||||
LOG.debug(
|
||||
"Could not revert all configuration changes dynamically. "
|
||||
"A restart will be required.")
|
||||
self.update_db(task_status=InstanceTasks.RESTART_REQUIRED)
|
||||
else:
|
||||
LOG.debug("No configuration found on instance. Skipping.")
|
||||
LOG.debug("There are no values to reset.")
|
||||
|
||||
def assign_configuration(self, configuration_id):
|
||||
self._validate_can_perform_assign()
|
||||
|
||||
try:
|
||||
configuration = Configuration.load(self.context, configuration_id)
|
||||
except exception.ModelNotFoundError:
|
||||
raise exception.NotFound(
|
||||
message='Configuration group id: %s could not be found.'
|
||||
% configuration_id)
|
||||
|
||||
config_ds_v = configuration.datastore_version_id
|
||||
inst_ds_v = self.db_info.datastore_version_id
|
||||
if (config_ds_v != inst_ds_v):
|
||||
raise exception.ConfigurationDatastoreNotMatchInstance(
|
||||
config_datastore_version=config_ds_v,
|
||||
instance_datastore_version=inst_ds_v)
|
||||
|
||||
config = Configuration(self.context, configuration.id)
|
||||
LOG.debug("Config is %s.", config)
|
||||
|
||||
self.update_overrides(config)
|
||||
self.update_db(configuration_id=configuration.id)
|
||||
|
||||
def update_overrides(self, config):
|
||||
LOG.debug("Updating or removing overrides for instance %s.", self.id)
|
||||
|
||||
overrides = config.get_configuration_overrides()
|
||||
self.guest.update_overrides(overrides)
|
||||
|
||||
# Apply the new configuration values dynamically to the running
|
||||
# datastore service.
|
||||
# Apply overrides only if ALL values can be applied at once or mark
|
||||
# the instance with a 'RESTART_REQUIRED' status.
|
||||
if not config.does_configuration_need_restart():
|
||||
self.guest.apply_overrides(overrides)
|
||||
else:
|
||||
LOG.debug("Configuration overrides has non-dynamic settings and "
|
||||
"will require restart to take effect.")
|
||||
self.update_db(task_status=InstanceTasks.RESTART_REQUIRED)
|
||||
return False
|
||||
|
||||
def _render_config_dict(self, flavor):
|
||||
config = template.SingleInstanceConfigTemplate(
|
||||
|
@ -389,13 +389,13 @@ class InstanceController(wsgi.Controller):
|
||||
configuration_id = kwargs['configuration_id']
|
||||
with StartNotification(context, instance_id=instance.id,
|
||||
configuration_id=configuration_id):
|
||||
instance.assign_configuration(configuration_id)
|
||||
instance.attach_configuration(configuration_id)
|
||||
else:
|
||||
context.notification = (
|
||||
notification.DBaaSInstanceDetachConfiguration(context,
|
||||
request=req))
|
||||
with StartNotification(context, instance_id=instance.id):
|
||||
instance.unassign_configuration()
|
||||
instance.detach_configuration()
|
||||
if 'datastore_version' in kwargs:
|
||||
datastore_version = datastore_models.DatastoreVersion.load(
|
||||
instance.datastore, kwargs['datastore_version'])
|
||||
|
@ -389,15 +389,6 @@ class Manager(periodic_task.PeriodicTasks):
|
||||
with EndNotification(context):
|
||||
instance_tasks.upgrade(datastore_version)
|
||||
|
||||
def update_overrides(self, context, instance_id, overrides):
|
||||
instance_tasks = models.BuiltInstanceTasks.load(context, instance_id)
|
||||
instance_tasks.update_overrides(overrides)
|
||||
|
||||
def unassign_configuration(self, context, instance_id, flavor,
|
||||
configuration_id):
|
||||
instance_tasks = models.BuiltInstanceTasks.load(context, instance_id)
|
||||
instance_tasks.unassign_configuration(flavor, configuration_id)
|
||||
|
||||
def create_cluster(self, context, cluster_id):
|
||||
with EndNotification(context, cluster_id=cluster_id):
|
||||
cluster_tasks = models.load_cluster_tasks(context, cluster_id)
|
||||
|
@ -168,6 +168,12 @@ cluster_restart_groups.extend([groups.CLUSTER_ACTIONS_RESTART_WAIT])
|
||||
cluster_upgrade_groups = list(cluster_create_groups)
|
||||
cluster_upgrade_groups.extend([groups.CLUSTER_UPGRADE_WAIT])
|
||||
|
||||
cluster_config_groups = list(cluster_create_groups)
|
||||
cluster_config_groups.extend([groups.CLUSTER_CFGGRP_DELETE])
|
||||
|
||||
cluster_config_actions_groups = list(cluster_config_groups)
|
||||
cluster_config_actions_groups.extend([groups.CLUSTER_ACTIONS_CFGGRP_ACTIONS])
|
||||
|
||||
cluster_groups = list(cluster_actions_groups)
|
||||
cluster_groups.extend([cluster_group.GROUP])
|
||||
|
||||
@ -254,6 +260,8 @@ register(["cluster_restart"], cluster_restart_groups)
|
||||
register(["cluster_root"], cluster_root_groups)
|
||||
register(["cluster_root_actions"], cluster_root_actions_groups)
|
||||
register(["cluster_upgrade"], cluster_upgrade_groups)
|
||||
register(["cluster_config"], cluster_config_groups)
|
||||
register(["cluster_config_actions"], cluster_config_actions_groups)
|
||||
register(["common"], common_groups)
|
||||
register(["configuration"], configuration_groups)
|
||||
register(["configuration_create"], configuration_create_groups)
|
||||
@ -294,7 +302,8 @@ register(
|
||||
user_actions_groups, ],
|
||||
multi=[cluster_actions_groups,
|
||||
cluster_negative_actions_groups,
|
||||
cluster_root_actions_groups, ]
|
||||
cluster_root_actions_groups,
|
||||
cluster_config_actions_groups, ]
|
||||
)
|
||||
|
||||
register(
|
||||
|
@ -51,7 +51,10 @@ CFGGRP_INST_DELETE_WAIT = "scenario.cfggrp_inst_delete_wait_grp"
|
||||
|
||||
|
||||
# Cluster Actions Group
|
||||
CLUSTER_CFGGRP_CREATE = "scenario.cluster_actions_cfggrp_create_grp"
|
||||
CLUSTER_CFGGRP_DELETE = "scenario.cluster_actions_cfggrp_delete_grp"
|
||||
CLUSTER_ACTIONS = "scenario.cluster_actions_grp"
|
||||
CLUSTER_ACTIONS_CFGGRP_ACTIONS = "scenario.cluster_actions_cfggrp_actions_grp"
|
||||
CLUSTER_ACTIONS_ROOT_ENABLE = "scenario.cluster_actions_root_enable_grp"
|
||||
CLUSTER_ACTIONS_ROOT_ACTIONS = "scenario.cluster_actions_root_actions_grp"
|
||||
CLUSTER_ACTIONS_ROOT_GROW = "scenario.cluster_actions_root_grow_grp"
|
||||
|
@ -29,7 +29,7 @@ class ClusterRunnerFactory(test_runners.RunnerFactory):
|
||||
_runner_cls = 'ClusterRunner'
|
||||
|
||||
|
||||
@test(groups=[GROUP, groups.CLUSTER_CREATE],
|
||||
@test(groups=[GROUP, groups.CLUSTER_CFGGRP_CREATE],
|
||||
runs_after_groups=[groups.MODULE_DELETE,
|
||||
groups.CFGGRP_INST_DELETE,
|
||||
groups.INST_ACTIONS_RESIZE_WAIT,
|
||||
@ -39,6 +39,20 @@ class ClusterRunnerFactory(test_runners.RunnerFactory):
|
||||
groups.ROOT_ACTION_INST_DELETE,
|
||||
groups.REPL_INST_DELETE_WAIT,
|
||||
groups.INST_DELETE])
|
||||
class ClusterConfigurationCreateGroup(TestGroup):
|
||||
|
||||
def __init__(self):
|
||||
super(ClusterConfigurationCreateGroup, self).__init__(
|
||||
ClusterRunnerFactory.instance())
|
||||
|
||||
@test
|
||||
def create_initial_configuration(self):
|
||||
"""Create a configuration group for a new cluster."""
|
||||
self.test_runner.run_initial_configuration_create()
|
||||
|
||||
|
||||
@test(groups=[GROUP, groups.CLUSTER_ACTIONS, groups.CLUSTER_CREATE],
|
||||
runs_after_groups=[groups.CLUSTER_CFGGRP_CREATE])
|
||||
class ClusterCreateGroup(TestGroup):
|
||||
|
||||
def __init__(self):
|
||||
@ -70,6 +84,11 @@ class ClusterCreateWaitGroup(TestGroup):
|
||||
"""Wait for cluster create to complete."""
|
||||
self.test_runner.run_cluster_create_wait()
|
||||
|
||||
@test(depends_on=[cluster_create_wait])
|
||||
def verify_initial_configuration(self):
|
||||
"""Verify initial configuration values on the cluster."""
|
||||
self.test_runner.run_verify_initial_configuration()
|
||||
|
||||
@test(depends_on=[cluster_create_wait])
|
||||
def add_initial_cluster_data(self):
|
||||
"""Add data to cluster."""
|
||||
@ -178,6 +197,11 @@ class ClusterGrowWaitGroup(TestGroup):
|
||||
"""Wait for cluster grow to complete."""
|
||||
self.test_runner.run_cluster_grow_wait()
|
||||
|
||||
@test(depends_on=[cluster_grow_wait])
|
||||
def verify_initial_configuration(self):
|
||||
"""Verify initial configuration values on the cluster."""
|
||||
self.test_runner.run_verify_initial_configuration()
|
||||
|
||||
@test(depends_on=[cluster_grow_wait])
|
||||
def verify_initial_cluster_data_after_grow(self):
|
||||
"""Verify the initial data still exists after cluster grow."""
|
||||
@ -245,6 +269,11 @@ class ClusterUpgradeWaitGroup(TestGroup):
|
||||
"""Wait for cluster upgrade to complete."""
|
||||
self.test_runner.run_cluster_upgrade_wait()
|
||||
|
||||
@test(depends_on=[cluster_upgrade_wait])
|
||||
def verify_initial_configuration(self):
|
||||
"""Verify initial configuration values on the cluster."""
|
||||
self.test_runner.run_verify_initial_configuration()
|
||||
|
||||
@test(depends_on=[cluster_upgrade_wait])
|
||||
def verify_initial_cluster_data_after_upgrade(self):
|
||||
"""Verify the initial data still exists after cluster upgrade."""
|
||||
@ -298,6 +327,11 @@ class ClusterShrinkWaitGroup(TestGroup):
|
||||
"""Wait for the cluster shrink to complete."""
|
||||
self.test_runner.run_cluster_shrink_wait()
|
||||
|
||||
@test(depends_on=[cluster_shrink_wait])
|
||||
def verify_initial_configuration(self):
|
||||
"""Verify initial configuration values on the cluster."""
|
||||
self.test_runner.run_verify_initial_configuration()
|
||||
|
||||
@test(depends_on=[cluster_shrink_wait])
|
||||
def verify_initial_cluster_data_after_shrink(self):
|
||||
"""Verify the initial data still exists after cluster shrink."""
|
||||
@ -336,6 +370,93 @@ class ClusterRootEnableShrinkGroup(TestGroup):
|
||||
self.test_runner.run_verify_cluster_root_enable()
|
||||
|
||||
|
||||
@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
|
||||
groups.CLUSTER_ACTIONS_CFGGRP_ACTIONS],
|
||||
depends_on_groups=[groups.CLUSTER_CREATE_WAIT],
|
||||
runs_after_groups=[groups.CLUSTER_ACTIONS_ROOT_SHRINK])
|
||||
class ClusterConfigurationActionsGroup(TestGroup):
|
||||
|
||||
def __init__(self):
|
||||
super(ClusterConfigurationActionsGroup, self).__init__(
|
||||
ClusterRunnerFactory.instance())
|
||||
|
||||
@test
|
||||
def detach_initial_configuration(self):
|
||||
"""Detach initial configuration group."""
|
||||
self.test_runner.run_detach_initial_configuration()
|
||||
|
||||
@test(depends_on=[detach_initial_configuration])
|
||||
def restart_cluster_after_detach(self):
|
||||
"""Restarting cluster after configuration change."""
|
||||
self.test_runner.restart_after_configuration_change()
|
||||
|
||||
@test
|
||||
def create_dynamic_configuration(self):
|
||||
"""Create a configuration group with only dynamic entries."""
|
||||
self.test_runner.run_create_dynamic_configuration()
|
||||
|
||||
@test
|
||||
def create_non_dynamic_configuration(self):
|
||||
"""Create a configuration group with only non-dynamic entries."""
|
||||
self.test_runner.run_create_non_dynamic_configuration()
|
||||
|
||||
@test(depends_on=[create_dynamic_configuration,
|
||||
restart_cluster_after_detach])
|
||||
def attach_dynamic_configuration(self):
|
||||
"""Test attach dynamic group."""
|
||||
self.test_runner.run_attach_dynamic_configuration()
|
||||
|
||||
@test(depends_on=[attach_dynamic_configuration])
|
||||
def verify_dynamic_configuration(self):
|
||||
"""Verify dynamic values on the cluster."""
|
||||
self.test_runner.run_verify_dynamic_configuration()
|
||||
|
||||
@test(depends_on=[attach_dynamic_configuration],
|
||||
runs_after=[verify_dynamic_configuration])
|
||||
def detach_dynamic_configuration(self):
|
||||
"""Test detach dynamic group."""
|
||||
self.test_runner.run_detach_dynamic_configuration()
|
||||
|
||||
@test(depends_on=[create_non_dynamic_configuration,
|
||||
detach_initial_configuration],
|
||||
runs_after=[detach_dynamic_configuration])
|
||||
def attach_non_dynamic_configuration(self):
|
||||
"""Test attach non-dynamic group."""
|
||||
self.test_runner.run_attach_non_dynamic_configuration()
|
||||
|
||||
@test(depends_on=[attach_non_dynamic_configuration])
|
||||
def restart_cluster_after_attach(self):
|
||||
"""Restarting cluster after configuration change."""
|
||||
self.test_runner.restart_after_configuration_change()
|
||||
|
||||
@test(depends_on=[restart_cluster_after_attach])
|
||||
def verify_non_dynamic_configuration(self):
|
||||
"""Verify non-dynamic values on the cluster."""
|
||||
self.test_runner.run_verify_non_dynamic_configuration()
|
||||
|
||||
@test(depends_on=[attach_non_dynamic_configuration],
|
||||
runs_after=[verify_non_dynamic_configuration])
|
||||
def detach_non_dynamic_configuration(self):
|
||||
"""Test detach non-dynamic group."""
|
||||
self.test_runner.run_detach_non_dynamic_configuration()
|
||||
|
||||
@test(runs_after=[detach_dynamic_configuration,
|
||||
detach_non_dynamic_configuration])
|
||||
def verify_initial_cluster_data(self):
|
||||
"""Verify the initial data still exists."""
|
||||
self.test_runner.run_verify_initial_cluster_data()
|
||||
|
||||
@test(depends_on=[detach_dynamic_configuration])
|
||||
def delete_dynamic_configuration(self):
|
||||
"""Test delete dynamic configuration group."""
|
||||
self.test_runner.run_delete_dynamic_configuration()
|
||||
|
||||
@test(depends_on=[detach_non_dynamic_configuration])
|
||||
def delete_non_dynamic_configuration(self):
|
||||
"""Test delete non-dynamic configuration group."""
|
||||
self.test_runner.run_delete_non_dynamic_configuration()
|
||||
|
||||
|
||||
@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
|
||||
groups.CLUSTER_DELETE],
|
||||
depends_on_groups=[groups.CLUSTER_CREATE_WAIT],
|
||||
@ -345,7 +466,9 @@ class ClusterRootEnableShrinkGroup(TestGroup):
|
||||
groups.CLUSTER_ACTIONS_GROW_WAIT,
|
||||
groups.CLUSTER_ACTIONS_SHRINK_WAIT,
|
||||
groups.CLUSTER_UPGRADE_WAIT,
|
||||
groups.CLUSTER_ACTIONS_RESTART_WAIT])
|
||||
groups.CLUSTER_ACTIONS_RESTART_WAIT,
|
||||
groups.CLUSTER_CFGGRP_CREATE,
|
||||
groups.CLUSTER_ACTIONS_CFGGRP_ACTIONS])
|
||||
class ClusterDeleteGroup(TestGroup):
|
||||
|
||||
def __init__(self):
|
||||
@ -376,3 +499,19 @@ class ClusterDeleteWaitGroup(TestGroup):
|
||||
def cluster_delete_wait(self):
|
||||
"""Wait for the existing cluster to be gone."""
|
||||
self.test_runner.run_cluster_delete_wait()
|
||||
|
||||
|
||||
@test(groups=[GROUP, groups.CLUSTER_ACTIONS,
|
||||
groups.CLUSTER_CFGGRP_DELETE],
|
||||
depends_on_groups=[groups.CLUSTER_CFGGRP_CREATE],
|
||||
runs_after_groups=[groups.CLUSTER_DELETE_WAIT])
|
||||
class ClusterConfigurationDeleteGroup(TestGroup):
|
||||
|
||||
def __init__(self):
|
||||
super(ClusterConfigurationDeleteGroup, self).__init__(
|
||||
ClusterRunnerFactory.instance())
|
||||
|
||||
@test
|
||||
def delete_initial_configuration(self):
|
||||
"""Delete initial configuration group."""
|
||||
self.test_runner.run_delete_initial_configuration()
|
||||
|
@ -13,6 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
from proboscis import SkipTest
|
||||
@ -49,6 +50,11 @@ class ClusterRunner(TestRunner):
|
||||
self.initial_instance_count = None
|
||||
self.cluster_instances = None
|
||||
self.cluster_removed_instances = None
|
||||
self.active_config_group_id = None
|
||||
self.config_requires_restart = False
|
||||
self.initial_group_id = None
|
||||
self.dynamic_group_id = None
|
||||
self.non_dynamic_group_id = None
|
||||
|
||||
@property
|
||||
def is_using_existing_cluster(self):
|
||||
@ -62,6 +68,15 @@ class ClusterRunner(TestRunner):
|
||||
def min_cluster_node_count(self):
|
||||
return 2
|
||||
|
||||
def run_initial_configuration_create(self, expected_http_code=200):
|
||||
group_id, requires_restart = self.create_initial_configuration(
|
||||
expected_http_code)
|
||||
if group_id:
|
||||
self.initial_group_id = group_id
|
||||
self.config_requires_restart = requires_restart
|
||||
else:
|
||||
raise SkipTest("No groups defined.")
|
||||
|
||||
def run_cluster_create(self, num_nodes=None, expected_task_name='BUILDING',
|
||||
expected_http_code=200):
|
||||
self.cluster_count_before_create = len(
|
||||
@ -84,11 +99,11 @@ class ClusterRunner(TestRunner):
|
||||
|
||||
self.cluster_id = self.assert_cluster_create(
|
||||
self.cluster_name, instance_defs, self.locality,
|
||||
expected_task_name, expected_http_code)
|
||||
self.initial_group_id, expected_task_name, expected_http_code)
|
||||
|
||||
def assert_cluster_create(
|
||||
self, cluster_name, instances_def, locality, expected_task_name,
|
||||
expected_http_code):
|
||||
self, cluster_name, instances_def, locality, configuration,
|
||||
expected_task_name, expected_http_code):
|
||||
|
||||
self.report.log("Testing cluster create: %s" % cluster_name)
|
||||
|
||||
@ -100,8 +115,10 @@ class ClusterRunner(TestRunner):
|
||||
cluster = client.clusters.create(
|
||||
cluster_name, self.instance_info.dbaas_datastore,
|
||||
self.instance_info.dbaas_datastore_version,
|
||||
instances=instances_def, locality=locality)
|
||||
instances=instances_def, locality=locality,
|
||||
configuration=configuration)
|
||||
self.assert_client_code(client, expected_http_code)
|
||||
self.active_config_group_id = configuration
|
||||
self._assert_cluster_values(cluster, expected_task_name)
|
||||
for instance in cluster.instances:
|
||||
self.register_debug_inst_ids(instance['id'])
|
||||
@ -202,7 +219,8 @@ class ClusterRunner(TestRunner):
|
||||
self.current_root_creds = client.root.create_cluster_root(
|
||||
self.cluster_id, root_credentials['password'])
|
||||
self.assert_client_code(client, expected_http_code)
|
||||
self._assert_cluster_response(client, cluster_id, expected_task_name)
|
||||
self._assert_cluster_response(
|
||||
client, self.cluster_id, expected_task_name)
|
||||
self.assert_equal(root_credentials['name'],
|
||||
self.current_root_creds[0])
|
||||
self.assert_equal(root_credentials['password'],
|
||||
@ -506,6 +524,8 @@ class ClusterRunner(TestRunner):
|
||||
check.has_field("updated", six.text_type)
|
||||
if check_locality:
|
||||
check.has_field("locality", six.text_type)
|
||||
if self.active_config_group_id:
|
||||
check.has_field("configuration", six.text_type)
|
||||
for instance in cluster.instances:
|
||||
isinstance(instance, dict)
|
||||
self.assert_is_not_none(instance['id'])
|
||||
@ -528,6 +548,214 @@ class ClusterRunner(TestRunner):
|
||||
except exceptions.NotFound:
|
||||
self.assert_client_code(client, 404)
|
||||
|
||||
def restart_after_configuration_change(self):
|
||||
if self.config_requires_restart:
|
||||
self.run_cluster_restart()
|
||||
self.run_cluster_restart_wait()
|
||||
self.config_requires_restart = False
|
||||
else:
|
||||
raise SkipTest("Not required.")
|
||||
|
||||
def run_create_dynamic_configuration(self, expected_http_code=200):
|
||||
values = self.test_helper.get_dynamic_group()
|
||||
if values:
|
||||
self.dynamic_group_id = self.assert_create_group(
|
||||
'dynamic_cluster_test_group',
|
||||
'a fully dynamic group should not require restart',
|
||||
values, expected_http_code)
|
||||
elif values is None:
|
||||
raise SkipTest("No dynamic group defined in %s." %
|
||||
self.test_helper.get_class_name())
|
||||
else:
|
||||
raise SkipTest("Datastore has no dynamic configuration values.")
|
||||
|
||||
def assert_create_group(self, name, description, values,
|
||||
expected_http_code):
|
||||
json_def = json.dumps(values)
|
||||
client = self.auth_client
|
||||
result = client.configurations.create(
|
||||
name,
|
||||
json_def,
|
||||
description,
|
||||
datastore=self.instance_info.dbaas_datastore,
|
||||
datastore_version=self.instance_info.dbaas_datastore_version)
|
||||
self.assert_client_code(client, expected_http_code)
|
||||
|
||||
return result.id
|
||||
|
||||
def run_create_non_dynamic_configuration(self, expected_http_code=200):
|
||||
values = self.test_helper.get_non_dynamic_group()
|
||||
if values:
|
||||
self.non_dynamic_group_id = self.assert_create_group(
|
||||
'non_dynamic_cluster_test_group',
|
||||
'a group containing non-dynamic properties should always '
|
||||
'require restart',
|
||||
values, expected_http_code)
|
||||
elif values is None:
|
||||
raise SkipTest("No non-dynamic group defined in %s." %
|
||||
self.test_helper.get_class_name())
|
||||
else:
|
||||
raise SkipTest("Datastore has no non-dynamic configuration "
|
||||
"values.")
|
||||
|
||||
def run_attach_dynamic_configuration(
|
||||
self, expected_states=['NONE'],
|
||||
expected_http_code=202):
|
||||
if self.dynamic_group_id:
|
||||
self.assert_attach_configuration(
|
||||
self.cluster_id, self.dynamic_group_id, expected_states,
|
||||
expected_http_code)
|
||||
|
||||
def assert_attach_configuration(
|
||||
self, cluster_id, group_id, expected_states, expected_http_code,
|
||||
restart_inst=False):
|
||||
client = self.auth_client
|
||||
client.clusters.configuration_attach(cluster_id, group_id)
|
||||
self.assert_client_code(client, expected_http_code)
|
||||
self.active_config_group_id = group_id
|
||||
self._assert_cluster_states(client, cluster_id, expected_states)
|
||||
self.assert_configuration_group(client, cluster_id, group_id)
|
||||
|
||||
if restart_inst:
|
||||
self.config_requires_restart = True
|
||||
cluster_instances = self._get_cluster_instances(cluster_id)
|
||||
for node in cluster_instances:
|
||||
self.assert_equal(
|
||||
'RESTART_REQUIRED', node.status,
|
||||
"Node '%s' should be in 'RESTART_REQUIRED' state."
|
||||
% node.id)
|
||||
|
||||
def assert_configuration_group(
|
||||
self, client, cluster_id, expected_group_id):
|
||||
cluster = client.clusters.get(cluster_id)
|
||||
self.assert_equal(
|
||||
expected_group_id, cluster.configuration,
|
||||
"Attached group does not have the expected ID.")
|
||||
|
||||
cluster_instances = self._get_cluster_instances(client, cluster_id)
|
||||
for node in cluster_instances:
|
||||
self.assert_equal(
|
||||
expected_group_id, cluster.configuration,
|
||||
"Attached group does not have the expected ID on "
|
||||
"cluster node: %s" % node.id)
|
||||
|
||||
def run_attach_non_dynamic_configuration(
|
||||
self, expected_states=['NONE'],
|
||||
expected_http_code=202):
|
||||
if self.non_dynamic_group_id:
|
||||
self.assert_attach_configuration(
|
||||
self.cluster_id, self.non_dynamic_group_id,
|
||||
expected_states, expected_http_code, restart_inst=True)
|
||||
|
||||
def run_verify_initial_configuration(self):
|
||||
if self.initial_group_id:
|
||||
self.verify_configuration(self.cluster_id, self.initial_group_id)
|
||||
|
||||
def verify_configuration(self, cluster_id, expected_group_id):
|
||||
self.assert_configuration_group(cluster_id, expected_group_id)
|
||||
self.assert_configuration_values(cluster_id, expected_group_id)
|
||||
|
||||
def assert_configuration_values(self, cluster_id, group_id):
|
||||
if group_id == self.initial_group_id:
|
||||
if not self.config_requires_restart:
|
||||
expected_configs = self.test_helper.get_dynamic_group()
|
||||
else:
|
||||
expected_configs = self.test_helper.get_non_dynamic_group()
|
||||
if group_id == self.dynamic_group_id:
|
||||
expected_configs = self.test_helper.get_dynamic_group()
|
||||
elif group_id == self.non_dynamic_group_id:
|
||||
expected_configs = self.test_helper.get_non_dynamic_group()
|
||||
|
||||
self._assert_configuration_values(cluster_id, expected_configs)
|
||||
|
||||
def _assert_configuration_values(self, cluster_id, expected_configs):
|
||||
cluster_instances = self._get_cluster_instances(cluster_id)
|
||||
for node in cluster_instances:
|
||||
host = self.get_instance_host(node)
|
||||
self.report.log(
|
||||
"Verifying cluster configuration via node: %s" % host)
|
||||
for name, value in expected_configs.items():
|
||||
actual = self.test_helper.get_configuration_value(name, host)
|
||||
self.assert_equal(str(value), str(actual),
|
||||
"Unexpected value of property '%s'" % name)
|
||||
|
||||
def run_verify_dynamic_configuration(self):
|
||||
if self.dynamic_group_id:
|
||||
self.verify_configuration(self.cluster_id, self.dynamic_group_id)
|
||||
|
||||
def run_verify_non_dynamic_configuration(self):
|
||||
if self.non_dynamic_group_id:
|
||||
self.verify_configuration(
|
||||
self.cluster_id, self.non_dynamic_group_id)
|
||||
|
||||
def run_detach_initial_configuration(self, expected_states=['NONE'],
|
||||
expected_http_code=202):
|
||||
if self.initial_group_id:
|
||||
self.assert_detach_configuration(
|
||||
self.cluster_id, expected_states, expected_http_code,
|
||||
restart_inst=self.config_requires_restart)
|
||||
|
||||
def run_detach_dynamic_configuration(self, expected_states=['NONE'],
|
||||
expected_http_code=202):
|
||||
if self.dynamic_group_id:
|
||||
self.assert_detach_configuration(
|
||||
self.cluster_id, expected_states, expected_http_code)
|
||||
|
||||
def assert_detach_configuration(
|
||||
self, cluster_id, expected_states, expected_http_code,
|
||||
restart_inst=False):
|
||||
client = self.auth_client
|
||||
client.clusters.configuration_detach(cluster_id)
|
||||
self.assert_client_code(client, expected_http_code)
|
||||
self.active_config_group_id = None
|
||||
self._assert_cluster_states(client, cluster_id, expected_states)
|
||||
cluster = client.clusters.get(cluster_id)
|
||||
self.assert_false(
|
||||
hasattr(cluster, 'configuration'),
|
||||
"Configuration group was not detached from the cluster.")
|
||||
|
||||
cluster_instances = self._get_cluster_instances(client, cluster_id)
|
||||
for node in cluster_instances:
|
||||
self.assert_false(
|
||||
hasattr(node, 'configuration'),
|
||||
"Configuration group was not detached from cluster node: %s"
|
||||
% node.id)
|
||||
|
||||
if restart_inst:
|
||||
self.config_requires_restart = True
|
||||
cluster_instances = self._get_cluster_instances(client, cluster_id)
|
||||
for node in cluster_instances:
|
||||
self.assert_equal(
|
||||
'RESTART_REQUIRED', node.status,
|
||||
"Node '%s' should be in 'RESTART_REQUIRED' state."
|
||||
% node.id)
|
||||
|
||||
def run_detach_non_dynamic_configuration(
|
||||
self, expected_states=['NONE'],
|
||||
expected_http_code=202):
|
||||
if self.non_dynamic_group_id:
|
||||
self.assert_detach_configuration(
|
||||
self.cluster_id, expected_states, expected_http_code,
|
||||
restart_inst=True)
|
||||
|
||||
def run_delete_initial_configuration(self, expected_http_code=202):
|
||||
if self.initial_group_id:
|
||||
self.assert_group_delete(self.initial_group_id, expected_http_code)
|
||||
|
||||
def assert_group_delete(self, group_id, expected_http_code):
|
||||
client = self.auth_client
|
||||
client.configurations.delete(group_id)
|
||||
self.assert_client_code(client, expected_http_code)
|
||||
|
||||
def run_delete_dynamic_configuration(self, expected_http_code=202):
|
||||
if self.dynamic_group_id:
|
||||
self.assert_group_delete(self.dynamic_group_id, expected_http_code)
|
||||
|
||||
def run_delete_non_dynamic_configuration(self, expected_http_code=202):
|
||||
if self.non_dynamic_group_id:
|
||||
self.assert_group_delete(self.non_dynamic_group_id,
|
||||
expected_http_code)
|
||||
|
||||
|
||||
class CassandraClusterRunner(ClusterRunner):
|
||||
|
||||
|
@ -13,8 +13,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
|
||||
from proboscis import SkipTest
|
||||
|
||||
from trove.tests.config import CONFIG
|
||||
@ -62,21 +60,9 @@ class InstanceCreateRunner(TestRunner):
|
||||
self.instance_info.helper_database = instance_info.helper_database
|
||||
|
||||
def run_initial_configuration_create(self, expected_http_code=200):
|
||||
dynamic_config = self.test_helper.get_dynamic_group()
|
||||
non_dynamic_config = self.test_helper.get_non_dynamic_group()
|
||||
values = dynamic_config or non_dynamic_config
|
||||
if values:
|
||||
json_def = json.dumps(values)
|
||||
client = self.auth_client
|
||||
result = client.configurations.create(
|
||||
'initial_configuration_for_instance_create',
|
||||
json_def,
|
||||
"Configuration group used by instance create tests.",
|
||||
datastore=self.instance_info.dbaas_datastore,
|
||||
datastore_version=self.instance_info.dbaas_datastore_version)
|
||||
self.assert_client_code(client, expected_http_code)
|
||||
|
||||
self.config_group_id = result.id
|
||||
group_id, _ = self.create_initial_configuration(expected_http_code)
|
||||
if group_id:
|
||||
self.config_group_id = group_id
|
||||
else:
|
||||
raise SkipTest("No groups defined.")
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
|
||||
import datetime
|
||||
import inspect
|
||||
import json
|
||||
import netaddr
|
||||
import os
|
||||
import proboscis
|
||||
@ -903,6 +904,25 @@ class TestRunner(object):
|
||||
full_list = client.databases.list(instance_id)
|
||||
return {database.name: database for database in full_list}
|
||||
|
||||
def create_initial_configuration(self, expected_http_code):
|
||||
client = self.auth_client
|
||||
dynamic_config = self.test_helper.get_dynamic_group()
|
||||
non_dynamic_config = self.test_helper.get_non_dynamic_group()
|
||||
values = dynamic_config or non_dynamic_config
|
||||
if values:
|
||||
json_def = json.dumps(values)
|
||||
result = client.configurations.create(
|
||||
'initial_configuration_for_create_tests',
|
||||
json_def,
|
||||
"Configuration group used by create tests.",
|
||||
datastore=self.instance_info.dbaas_datastore,
|
||||
datastore_version=self.instance_info.dbaas_datastore_version)
|
||||
self.assert_client_code(client, expected_http_code)
|
||||
|
||||
return (result.id, dynamic_config is None)
|
||||
|
||||
return (None, False)
|
||||
|
||||
|
||||
class CheckInstance(AttrCheck):
|
||||
"""Class to check various attributes of Instance details."""
|
||||
|
@ -53,7 +53,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
CassandraCluster._create_cluster_instances(
|
||||
self.context, 'test_cluster_id', 'test_cluster',
|
||||
datastore, datastore_version,
|
||||
test_instances, None, None)
|
||||
test_instances, None, None, None)
|
||||
|
||||
check_quotas.assert_called_once_with(
|
||||
ANY, instances=num_instances, volumes=get_vol_size.return_value)
|
||||
|
@ -81,8 +81,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
[],
|
||||
None, None
|
||||
)
|
||||
None, None, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
def test_create_unequal_flavors(self, mock_client):
|
||||
@ -95,8 +94,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None, None
|
||||
)
|
||||
None, None, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
def test_create_unequal_volumes(self,
|
||||
@ -112,8 +110,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None, None
|
||||
)
|
||||
None, None, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
def test_create_storage_not_specified(self,
|
||||
@ -142,8 +139,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None, None
|
||||
)
|
||||
None, None, None)
|
||||
|
||||
@patch('trove.cluster.models.LOG')
|
||||
def test_delete_bad_task_status(self, mock_logging):
|
||||
|
@ -195,7 +195,7 @@ class TestClusterController(trove_testtools.TestCase):
|
||||
mock_cluster_create.assert_called_with(context, 'products',
|
||||
datastore, datastore_version,
|
||||
instances, {},
|
||||
self.locality)
|
||||
self.locality, None)
|
||||
|
||||
@patch.object(Cluster, 'load')
|
||||
def test_show_cluster(self,
|
||||
|
@ -159,7 +159,7 @@ class TestClusterController(trove_testtools.TestCase):
|
||||
self.controller.create(req, body, tenant_id)
|
||||
mock_cluster_create.assert_called_with(context, 'products',
|
||||
datastore, datastore_version,
|
||||
instances, {}, None)
|
||||
instances, {}, None, None)
|
||||
|
||||
@patch.object(Cluster, 'load')
|
||||
def test_show_cluster(self,
|
||||
|
@ -196,7 +196,7 @@ class TestClusterController(trove_testtools.TestCase):
|
||||
self.controller.create(req, body, tenant_id)
|
||||
mock_cluster_create.assert_called_with(context, 'products',
|
||||
datastore, datastore_version,
|
||||
instances, {}, None)
|
||||
instances, {}, None, None)
|
||||
|
||||
@patch.object(Cluster, 'load')
|
||||
def test_show_cluster(self,
|
||||
|
@ -159,7 +159,7 @@ class TestClusterController(trove_testtools.TestCase):
|
||||
self.controller.create(req, body, tenant_id)
|
||||
mock_cluster_create.assert_called_with(context, 'products',
|
||||
datastore, datastore_version,
|
||||
instances, {}, None)
|
||||
instances, {}, None, None)
|
||||
|
||||
@patch.object(Cluster, 'load')
|
||||
def test_show_cluster(self,
|
||||
|
@ -83,8 +83,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
[], {}, None
|
||||
)
|
||||
[], {}, None, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
def test_create_flavor_not_specified(self, mock_client):
|
||||
@ -96,8 +95,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}, None
|
||||
)
|
||||
instances, {}, None, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
def test_create_invalid_flavor_specified(self,
|
||||
@ -116,8 +114,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}, None
|
||||
)
|
||||
instances, {}, None, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
def test_create_volume_no_specified(self,
|
||||
@ -132,8 +129,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}, None
|
||||
)
|
||||
instances, {}, None, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@patch.object(galera_api, 'CONF')
|
||||
@ -152,8 +148,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}, None
|
||||
)
|
||||
instances, {}, None, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@patch.object(galera_api, 'CONF')
|
||||
@ -184,8 +179,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}, None
|
||||
)
|
||||
instances, {}, None, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
def test_create_volume_not_equal(self, mock_client):
|
||||
@ -199,8 +193,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}, None
|
||||
)
|
||||
instances, {}, None, None)
|
||||
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
@patch.object(inst_models.Instance, 'create')
|
||||
@ -219,7 +212,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}, None)
|
||||
instances, {}, None, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
mock_db_create.return_value.id)
|
||||
self.assertEqual(3, mock_ins_create.call_count)
|
||||
@ -241,7 +234,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}, None)
|
||||
instances, {}, None, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
mock_db_create.return_value.id)
|
||||
self.assertEqual(4, mock_ins_create.call_count)
|
||||
@ -279,7 +272,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances, {}, None)
|
||||
instances, {}, None, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
mock_db_create.return_value.id)
|
||||
self.assertEqual(3, mock_ins_create.call_count)
|
||||
|
@ -91,7 +91,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
self.instances_w_volumes,
|
||||
{}, None)
|
||||
{}, None, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@patch.object(redis_api, 'CONF')
|
||||
@ -105,7 +105,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
self.instances_no_volumes,
|
||||
{}, None)
|
||||
{}, None, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@patch.object(redis_api, 'CONF')
|
||||
@ -122,7 +122,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
self.instances_w_volumes,
|
||||
{}, None)
|
||||
{}, None, None)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
@patch.object(redis_api, 'CONF')
|
||||
@ -151,7 +151,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
self.instances_no_volumes,
|
||||
{}, None)
|
||||
{}, None, None)
|
||||
|
||||
@patch.object(redis_api, 'CONF')
|
||||
@patch.object(inst_models.Instance, 'create')
|
||||
@ -167,7 +167,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
self.instances_w_volumes, {}, None)
|
||||
self.instances_w_volumes, {}, None, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
self.dbcreate_mock.return_value.id)
|
||||
self.assertEqual(3, mock_ins_create.call_count)
|
||||
@ -199,7 +199,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
self.instances_no_volumes, {}, None)
|
||||
self.instances_no_volumes, {}, None, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
self.dbcreate_mock.return_value.id)
|
||||
self.assertEqual(3, mock_ins_create.call_count)
|
||||
|
@ -80,7 +80,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
[], None, None)
|
||||
[], None, None, None)
|
||||
|
||||
@patch.object(DBCluster, 'create')
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
@ -95,8 +95,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None, None
|
||||
)
|
||||
None, None, None)
|
||||
|
||||
@patch.object(DBCluster, 'create')
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
@ -118,8 +117,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None, None
|
||||
)
|
||||
None, None, None)
|
||||
|
||||
@patch.object(DBCluster, 'create')
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
@ -137,8 +135,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None, None
|
||||
)
|
||||
None, None, None)
|
||||
|
||||
@patch.object(DBCluster, 'create')
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
@ -162,8 +159,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None, None
|
||||
)
|
||||
None, None, None)
|
||||
|
||||
@patch.object(DBCluster, 'create')
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
@ -199,8 +195,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None, None
|
||||
)
|
||||
None, None, None)
|
||||
|
||||
@patch.object(DBCluster, 'create')
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
@ -218,8 +213,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None, None
|
||||
)
|
||||
None, None, None)
|
||||
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
@patch.object(inst_models.Instance, 'create')
|
||||
@ -237,7 +231,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None, None)
|
||||
None, None, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
mock_db_create.return_value.id)
|
||||
self.assertEqual(3, mock_ins_create.call_count)
|
||||
@ -276,7 +270,7 @@ class ClusterTest(trove_testtools.TestCase):
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances,
|
||||
None, None)
|
||||
None, None, None)
|
||||
mock_task_api.return_value.create_cluster.assert_called_with(
|
||||
mock_db_create.return_value.id)
|
||||
self.assertEqual(3, mock_ins_create.call_count)
|
||||
|
@ -257,8 +257,8 @@ class TestInstanceController(trove_testtools.TestCase):
|
||||
def _setup_modify_instance_mocks(self):
|
||||
instance = Mock()
|
||||
instance.detach_replica = Mock()
|
||||
instance.assign_configuration = Mock()
|
||||
instance.unassign_configuration = Mock()
|
||||
instance.attach_configuration = Mock()
|
||||
instance.detach_configuration = Mock()
|
||||
instance.update_db = Mock()
|
||||
return instance
|
||||
|
||||
@ -270,8 +270,8 @@ class TestInstanceController(trove_testtools.TestCase):
|
||||
instance, **args)
|
||||
|
||||
self.assertEqual(0, instance.detach_replica.call_count)
|
||||
self.assertEqual(0, instance.unassign_configuration.call_count)
|
||||
self.assertEqual(0, instance.assign_configuration.call_count)
|
||||
self.assertEqual(0, instance.detach_configuration.call_count)
|
||||
self.assertEqual(0, instance.attach_configuration.call_count)
|
||||
self.assertEqual(0, instance.update_db.call_count)
|
||||
|
||||
def test_modify_instance_with_nonempty_args_calls_update_db(self):
|
||||
@ -312,7 +312,7 @@ class TestInstanceController(trove_testtools.TestCase):
|
||||
self.controller._modify_instance(self.context, self.req,
|
||||
instance, **args)
|
||||
|
||||
self.assertEqual(1, instance.assign_configuration.call_count)
|
||||
self.assertEqual(1, instance.attach_configuration.call_count)
|
||||
|
||||
def test_modify_instance_with_None_configuration_id_arg(self):
|
||||
instance = self._setup_modify_instance_mocks()
|
||||
@ -322,7 +322,7 @@ class TestInstanceController(trove_testtools.TestCase):
|
||||
self.controller._modify_instance(self.context, self.req,
|
||||
instance, **args)
|
||||
|
||||
self.assertEqual(1, instance.unassign_configuration.call_count)
|
||||
self.assertEqual(1, instance.detach_configuration.call_count)
|
||||
|
||||
def test_modify_instance_with_all_args(self):
|
||||
instance = self._setup_modify_instance_mocks()
|
||||
@ -334,5 +334,5 @@ class TestInstanceController(trove_testtools.TestCase):
|
||||
instance, **args)
|
||||
|
||||
self.assertEqual(1, instance.detach_replica.call_count)
|
||||
self.assertEqual(1, instance.assign_configuration.call_count)
|
||||
self.assertEqual(1, instance.attach_configuration.call_count)
|
||||
instance.update_db.assert_called_once_with(**args)
|
||||
|
Loading…
Reference in New Issue
Block a user