Clusters API Implementation
adds clusters api, with mongodb sharding as the first implemenation. Co-Authored-By: Michael Yu <michayu@ebaysf.com> Co-Authored-By: Mat Lowery <mlowery@ebaysf.com> Co-Authored-By: rumale <rumale@ebaysf.com> Co-Authored-By: Timothy He <the@ebaysf.com> Partially implements: blueprint clustering Change-Id: Icab6fc3baab72e97f3231eaa4476b56b8dafb2a3
This commit is contained in:
parent
11b8a0b035
commit
bc64756995
|
@ -209,5 +209,8 @@ volume_support = True
|
|||
device_path = /dev/vdb
|
||||
|
||||
[mongodb]
|
||||
tcp_ports = 2500, 27017
|
||||
volume_support = True
|
||||
device_path = /dev/vdb
|
||||
num_config_servers_per_cluster = 1
|
||||
num_query_routers_per_cluster = 1
|
||||
|
|
|
@ -80,6 +80,8 @@ class Backup(object):
|
|||
cls.validate_can_perform_action(
|
||||
instance_model, 'backup_create')
|
||||
cls.verify_swift_auth_token(context)
|
||||
if instance_model.cluster_id is not None:
|
||||
raise exception.ClusterInstanceOperationNotSupported()
|
||||
|
||||
ds = instance_model.datastore
|
||||
ds_version = instance_model.datastore_version
|
||||
|
|
|
@ -0,0 +1,236 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from trove.cluster.tasks import ClusterTask
|
||||
from trove.cluster.tasks import ClusterTasks
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.common import strategy
|
||||
from trove.datastore import models as datastore_models
|
||||
from trove.db import models as dbmodels
|
||||
from trove.instance import models as inst_models
|
||||
from trove.openstack.common import log as logging
|
||||
from trove.openstack.common.gettextutils import _
|
||||
from trove.taskmanager import api as task_api
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def persisted_models():
|
||||
return {
|
||||
'clusters': DBCluster,
|
||||
}
|
||||
|
||||
|
||||
class DBCluster(dbmodels.DatabaseModelBase):
|
||||
_data_fields = ['id', 'created', 'updated', 'name', 'task_id',
|
||||
'tenant_id', 'datastore_version_id', 'deleted',
|
||||
'deleted_at']
|
||||
|
||||
def __init__(self, task_status, **kwargs):
|
||||
"""
|
||||
Creates a new persistable entity of the cluster.
|
||||
:param task_status: the current task of the cluster.
|
||||
:type task_status: trove.cluster.tasks.ClusterTask
|
||||
"""
|
||||
kwargs["task_id"] = task_status.code
|
||||
kwargs["deleted"] = False
|
||||
super(DBCluster, self).__init__(**kwargs)
|
||||
self.task_status = task_status
|
||||
|
||||
def _validate(self, errors):
|
||||
if ClusterTask.from_code(self.task_id) is None:
|
||||
errors['task_id'] = "Not valid."
|
||||
if self.task_status is None:
|
||||
errors['task_status'] = "Cannot be None."
|
||||
|
||||
@property
|
||||
def task_status(self):
|
||||
return ClusterTask.from_code(self.task_id)
|
||||
|
||||
@task_status.setter
|
||||
def task_status(self, task_status):
|
||||
self.task_id = task_status.code
|
||||
|
||||
|
||||
class Cluster(object):
|
||||
DEFAULT_LIMIT = CONF.clusters_page_size
|
||||
|
||||
def __init__(self, context, db_info, datastore=None,
|
||||
datastore_version=None):
|
||||
self.context = context
|
||||
self.db_info = db_info
|
||||
self.ds = datastore
|
||||
self.ds_version = datastore_version
|
||||
if self.ds_version is None:
|
||||
self.ds_version = (datastore_models.DatastoreVersion.
|
||||
load_by_uuid(self.db_info.datastore_version_id))
|
||||
if self.ds is None:
|
||||
self.ds = (datastore_models.Datastore.
|
||||
load(self.ds_version.datastore_id))
|
||||
|
||||
@classmethod
|
||||
def load_all(cls, context, tenant_id):
|
||||
db_infos = DBCluster.find_all(tenant_id=tenant_id,
|
||||
deleted=False)
|
||||
limit = int(context.limit or Cluster.DEFAULT_LIMIT)
|
||||
if limit > Cluster.DEFAULT_LIMIT:
|
||||
limit = Cluster.DEFAULT_LIMIT
|
||||
data_view = DBCluster.find_by_pagination('clusters', db_infos, "foo",
|
||||
limit=limit,
|
||||
marker=context.marker)
|
||||
next_marker = data_view.next_page_marker
|
||||
ret = [cls(context, db_info) for db_info in data_view.collection]
|
||||
return ret, next_marker
|
||||
|
||||
@classmethod
|
||||
def load(cls, context, cluster_id, clazz=None):
|
||||
try:
|
||||
db_info = DBCluster.find_by(context=context, id=cluster_id,
|
||||
deleted=False)
|
||||
except exception.ModelNotFoundError:
|
||||
raise exception.ClusterNotFound(cluster=cluster_id)
|
||||
if not clazz:
|
||||
ds_version = (datastore_models.DatastoreVersion.
|
||||
load_by_uuid(db_info.datastore_version_id))
|
||||
manager = ds_version.manager
|
||||
clazz = strategy.load_api_strategy(manager).cluster_class
|
||||
return clazz(context, db_info)
|
||||
|
||||
def update_db(self, **values):
|
||||
self.db_info = DBCluster.find_by(id=self.id, deleted=False)
|
||||
for key in values:
|
||||
setattr(self.db_info, key, values[key])
|
||||
self.db_info.save()
|
||||
|
||||
def reset_task(self):
|
||||
LOG.info(_("Setting task to NONE on cluster %s") % self.id)
|
||||
self.update_db(task_status=ClusterTasks.NONE)
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.db_info.id
|
||||
|
||||
@property
|
||||
def created(self):
|
||||
return self.db_info.created
|
||||
|
||||
@property
|
||||
def updated(self):
|
||||
return self.db_info.updated
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self.db_info.name
|
||||
|
||||
@property
|
||||
def task_id(self):
|
||||
return self.db_info.task_status.code
|
||||
|
||||
@property
|
||||
def task_name(self):
|
||||
return self.db_info.task_status.name
|
||||
|
||||
@property
|
||||
def task_description(self):
|
||||
return self.db_info.task_status.description
|
||||
|
||||
@property
|
||||
def tenant_id(self):
|
||||
return self.db_info.tenant_id
|
||||
|
||||
@property
|
||||
def datastore(self):
|
||||
return self.ds
|
||||
|
||||
@property
|
||||
def datastore_version(self):
|
||||
return self.ds_version
|
||||
|
||||
@property
|
||||
def deleted(self):
|
||||
return self.db_info.deleted
|
||||
|
||||
@property
|
||||
def deleted_at(self):
|
||||
return self.db_info.deleted_at
|
||||
|
||||
@property
|
||||
def instances(self):
|
||||
return inst_models.Instances.load_all_by_cluster_id(self.context,
|
||||
self.db_info.id)
|
||||
|
||||
@property
|
||||
def instances_without_server(self):
|
||||
return inst_models.Instances.load_all_by_cluster_id(
|
||||
self.context, self.db_info.id, load_servers=False)
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version, instances):
|
||||
api_strategy = strategy.load_api_strategy(datastore_version.manager)
|
||||
return api_strategy.cluster_class.create(context, name, datastore,
|
||||
datastore_version, instances)
|
||||
|
||||
def delete(self):
|
||||
|
||||
if self.db_info.task_status not in (ClusterTasks.NONE,
|
||||
ClusterTasks.DELETING):
|
||||
current_task = self.db_info.task_status.name
|
||||
msg = _("This action cannot be performed on the cluster while "
|
||||
"the current cluster task is '%s'.") % current_task
|
||||
LOG.error(msg)
|
||||
raise exception.UnprocessableEntity(msg)
|
||||
|
||||
db_insts = inst_models.DBInstance.find_all(cluster_id=self.id,
|
||||
deleted=False).all()
|
||||
|
||||
self.update_db(task_status=ClusterTasks.DELETING)
|
||||
|
||||
for db_inst in db_insts:
|
||||
instance = inst_models.load_any_instance(self.context, db_inst.id)
|
||||
instance.delete()
|
||||
|
||||
task_api.API(self.context).delete_cluster(self.id)
|
||||
|
||||
@staticmethod
|
||||
def load_instance(context, cluster_id, instance_id):
|
||||
return inst_models.load_instance_with_guest(
|
||||
inst_models.DetailInstance, context, instance_id, cluster_id)
|
||||
|
||||
@staticmethod
|
||||
def manager_from_cluster_id(context, cluster_id):
|
||||
db_info = DBCluster.find_by(context=context, id=cluster_id,
|
||||
deleted=False)
|
||||
ds_version = (datastore_models.DatastoreVersion.
|
||||
load_by_uuid(db_info.datastore_version_id))
|
||||
return ds_version.manager
|
||||
|
||||
|
||||
def is_cluster_deleting(context, cluster_id):
|
||||
cluster = Cluster.load(context, cluster_id)
|
||||
return cluster.db_info.task_status == ClusterTasks.DELETING
|
||||
|
||||
|
||||
def validate_volume_size(size):
|
||||
if size is None:
|
||||
raise exception.VolumeSizeNotSpecified()
|
||||
max_size = CONF.max_accepted_volume_size
|
||||
if int(size) > max_size:
|
||||
msg = ("Volume 'size' cannot exceed maximum "
|
||||
"of %d Gb, %s cannot be accepted."
|
||||
% (max_size, size))
|
||||
raise exception.VolumeQuotaExceeded(msg)
|
|
@ -0,0 +1,165 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config.cfg import NoSuchOptError
|
||||
|
||||
from trove.cluster import models
|
||||
from trove.cluster import views
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.common import pagination
|
||||
from trove.common import apischema
|
||||
from trove.common import strategy
|
||||
from trove.common import utils
|
||||
from trove.common import wsgi
|
||||
from trove.datastore import models as datastore_models
|
||||
from trove.openstack.common import log as logging
|
||||
from trove.openstack.common.gettextutils import _
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ClusterController(wsgi.Controller):
|
||||
|
||||
"""Controller for cluster functionality."""
|
||||
schemas = apischema.cluster.copy()
|
||||
|
||||
@classmethod
|
||||
def get_action_schema(cls, body, action_schema):
|
||||
action_type = body.keys()[0]
|
||||
return action_schema.get(action_type, {})
|
||||
|
||||
@classmethod
|
||||
def get_schema(cls, action, body):
|
||||
action_schema = super(ClusterController, cls).get_schema(action, body)
|
||||
if action == 'action':
|
||||
action_schema = cls.get_action_schema(body, action_schema)
|
||||
return action_schema
|
||||
|
||||
def action(self, req, body, tenant_id, id):
|
||||
LOG.debug("Committing Action Against Cluster for "
|
||||
"Tenant '%s'" % tenant_id)
|
||||
LOG.info(_("req : '%s'\n\n") % req)
|
||||
LOG.info(_("id : '%s'\n\n") % id)
|
||||
if not body:
|
||||
raise exception.BadRequest(_("Invalid request body."))
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
cluster = models.Cluster.load(context, id)
|
||||
manager = cluster.datastore_version.manager
|
||||
api_strategy = strategy.load_api_strategy(manager)
|
||||
_actions = api_strategy.cluster_controller_actions
|
||||
selected_action = None
|
||||
for key in body:
|
||||
if key in _actions:
|
||||
selected_action = _actions[key]
|
||||
break
|
||||
else:
|
||||
message = _("No action '%(action)s' supplied "
|
||||
"by strategy for manager '%(manager)s'") % (
|
||||
{'action': key, 'manager': manager})
|
||||
raise exception.TroveError(message)
|
||||
return selected_action(cluster, body)
|
||||
|
||||
def show(self, req, tenant_id, id):
|
||||
"""Return a single cluster."""
|
||||
LOG.debug("Showing a Cluster for Tenant '%s'" % tenant_id)
|
||||
LOG.info(_("req : '%s'\n\n") % req)
|
||||
LOG.info(_("id : '%s'\n\n") % id)
|
||||
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
cluster = models.Cluster.load(context, id)
|
||||
return wsgi.Result(views.load_view(cluster, req=req).data(), 200)
|
||||
|
||||
def show_instance(self, req, tenant_id, cluster_id, instance_id):
|
||||
"""Return a single instance belonging to a cluster."""
|
||||
LOG.debug("Showing an Instance in a Cluster for Tenant '%s'"
|
||||
% tenant_id)
|
||||
LOG.info(_("req : '%s'\n\n") % req)
|
||||
LOG.info(_("cluster_id : '%s'\n\n") % cluster_id)
|
||||
LOG.info(_("instance_id : '%s'\n\n") % instance_id)
|
||||
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
cluster = models.Cluster.load(context, cluster_id)
|
||||
instance = models.Cluster.load_instance(context, cluster.id,
|
||||
instance_id)
|
||||
return wsgi.Result(views.ClusterInstanceDetailView(
|
||||
instance, req=req).data(), 200)
|
||||
|
||||
def delete(self, req, tenant_id, id):
|
||||
"""Delete a cluster."""
|
||||
LOG.debug("Deleting a Cluster for Tenant '%s'" % tenant_id)
|
||||
LOG.info(_("req : '%s'\n\n") % req)
|
||||
LOG.info(_("id : '%s'\n\n") % id)
|
||||
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
cluster = models.Cluster.load(context, id)
|
||||
cluster.delete()
|
||||
return wsgi.Result(None, 202)
|
||||
|
||||
def index(self, req, tenant_id):
|
||||
"""Return a list of clusters."""
|
||||
LOG.debug("Showing a list of clusters for Tenant '%s'" % tenant_id)
|
||||
LOG.info(_("req : '%s'\n\n") % req)
|
||||
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
if not context.is_admin and context.tenant != tenant_id:
|
||||
raise exception.TroveOperationAuthError(tenant_id=context.tenant)
|
||||
|
||||
# load all clusters and instances for the tenant
|
||||
clusters, marker = models.Cluster.load_all(context, tenant_id)
|
||||
view = views.ClustersView(clusters, req=req)
|
||||
paged = pagination.SimplePaginatedDataView(req.url, 'clusters', view,
|
||||
marker)
|
||||
return wsgi.Result(paged.data(), 200)
|
||||
|
||||
def create(self, req, body, tenant_id):
|
||||
LOG.debug("Creating a Cluster for Tenant '%s'" % tenant_id)
|
||||
LOG.info(_("req : '%s'\n\n") % req)
|
||||
LOG.info(_("body : '%s'\n\n") % body)
|
||||
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
name = body['cluster']['name']
|
||||
datastore_args = body['cluster'].get('datastore', {})
|
||||
datastore, datastore_version = (
|
||||
datastore_models.get_datastore_version(**datastore_args))
|
||||
|
||||
try:
|
||||
clusters_enabled = (CONF.get(datastore_version.manager)
|
||||
.get('cluster_support'))
|
||||
except NoSuchOptError:
|
||||
clusters_enabled = False
|
||||
|
||||
if not clusters_enabled:
|
||||
raise exception.ClusterDatastoreNotSupported(
|
||||
datastore=datastore.name,
|
||||
datastore_version=datastore_version.name)
|
||||
|
||||
nodes = body['cluster']['instances']
|
||||
instances = []
|
||||
for node in nodes:
|
||||
flavor_id = utils.get_id_from_href(node['flavorRef'])
|
||||
if 'volume' in node:
|
||||
volume_size = int(node['volume']['size'])
|
||||
else:
|
||||
volume_size = None
|
||||
instances.append({"flavor_id": flavor_id,
|
||||
"volume_size": volume_size})
|
||||
|
||||
cluster = models.Cluster.create(context, name, datastore,
|
||||
datastore_version, instances)
|
||||
view = views.load_view(cluster, req=req)
|
||||
return wsgi.Result(view.data(), 200)
|
|
@ -0,0 +1,70 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class ClusterTask(object):
|
||||
"""
|
||||
Stores the different kind of tasks being performed by a cluster.
|
||||
"""
|
||||
_lookup = {}
|
||||
|
||||
def __init__(self, code, name, description):
|
||||
self._code = int(code)
|
||||
self._name = name
|
||||
self._description = description
|
||||
ClusterTask._lookup[self._code] = self
|
||||
|
||||
@property
|
||||
def code(self):
|
||||
return self._code
|
||||
|
||||
@property
|
||||
def name(self):
|
||||
return self._name
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return self._description
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, ClusterTask):
|
||||
return False
|
||||
return self._code == other._code
|
||||
|
||||
@classmethod
|
||||
def from_code(cls, code):
|
||||
if code not in cls._lookup:
|
||||
return None
|
||||
return cls._lookup[code]
|
||||
|
||||
def __str__(self):
|
||||
return "(%d %s %s)" % (self._code, self._name,
|
||||
self._description)
|
||||
|
||||
def __repr__(self):
|
||||
return "ClusterTask.%s (%s)" % (self._name,
|
||||
self._description)
|
||||
|
||||
|
||||
class ClusterTasks(object):
|
||||
NONE = ClusterTask(0x01, 'NONE', 'No tasks for the cluster.')
|
||||
BUILDING_INITIAL = ClusterTask(
|
||||
0x02, 'BUILDING', 'Building the initial cluster.')
|
||||
DELETING = ClusterTask(0x03, 'DELETING', 'Deleting the cluster.')
|
||||
ADDING_SHARD = ClusterTask(
|
||||
0x04, 'ADDING_SHARD', 'Adding a shard to the cluster.')
|
||||
|
||||
# Dissuade further additions at run-time.
|
||||
ClusterTask.__init__ = None
|
|
@ -0,0 +1,92 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from trove.common import strategy
|
||||
from trove.common.views import create_links
|
||||
from trove.instance.views import InstanceDetailView
|
||||
from trove.openstack.common import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ClusterView(object):
|
||||
|
||||
def __init__(self, cluster, req=None, load_servers=True):
|
||||
self.cluster = cluster
|
||||
self.req = req
|
||||
self.load_servers = load_servers
|
||||
|
||||
def data(self):
|
||||
instances, ip_list = self.build_instances()
|
||||
cluster_dict = {
|
||||
"id": self.cluster.id,
|
||||
"name": self.cluster.name,
|
||||
"task": {"id": self.cluster.task_id,
|
||||
"name": self.cluster.task_name,
|
||||
"description": self.cluster.task_description},
|
||||
"created": self.cluster.created,
|
||||
"updated": self.cluster.updated,
|
||||
"links": self._build_links(),
|
||||
"datastore": {"type": self.cluster.datastore.name,
|
||||
"version": self.cluster.datastore_version.name},
|
||||
"instances": instances
|
||||
}
|
||||
if ip_list:
|
||||
cluster_dict["ip"] = ip_list
|
||||
LOG.debug(cluster_dict)
|
||||
return {"cluster": cluster_dict}
|
||||
|
||||
def _build_links(self):
|
||||
return create_links("clusters", self.req, self.cluster.id)
|
||||
|
||||
def build_instances(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
def _build_flavor_info(self, flavor_id):
|
||||
return {
|
||||
"id": flavor_id,
|
||||
"links": create_links("flavors", self.req, flavor_id)
|
||||
}
|
||||
|
||||
|
||||
class ClusterInstanceDetailView(InstanceDetailView):
|
||||
def __init__(self, instance, req):
|
||||
super(ClusterInstanceDetailView, self).__init__(instance, req=req)
|
||||
|
||||
def data(self):
|
||||
result = super(ClusterInstanceDetailView, self).data()
|
||||
return result
|
||||
|
||||
|
||||
class ClustersView(object):
|
||||
def __init__(self, clusters, req=None):
|
||||
self.clusters = clusters
|
||||
self.req = req
|
||||
|
||||
def data(self):
|
||||
data = []
|
||||
for cluster in self.clusters:
|
||||
data.append(self.data_for_cluster(cluster))
|
||||
return {'clusters': data}
|
||||
|
||||
def data_for_cluster(self, cluster):
|
||||
view = load_view(cluster, req=self.req, load_servers=False)
|
||||
return view.data()['cluster']
|
||||
|
||||
|
||||
def load_view(cluster, req, load_servers=True):
|
||||
manager = cluster.datastore_version.manager
|
||||
return strategy.load_api_strategy(manager).cluster_view_class(
|
||||
cluster, req, load_servers)
|
|
@ -15,6 +15,7 @@
|
|||
import routes
|
||||
|
||||
from trove.common import wsgi
|
||||
from trove.cluster.service import ClusterController
|
||||
from trove.configuration.service import ConfigurationsController
|
||||
from trove.configuration.service import ParametersController
|
||||
from trove.flavor.service import FlavorController
|
||||
|
@ -31,6 +32,7 @@ class API(wsgi.Router):
|
|||
mapper = routes.Mapper()
|
||||
super(API, self).__init__(mapper)
|
||||
self._instance_router(mapper)
|
||||
self._cluster_router(mapper)
|
||||
self._datastore_router(mapper)
|
||||
self._flavor_router(mapper)
|
||||
self._versions_router(mapper)
|
||||
|
@ -94,6 +96,34 @@ class API(wsgi.Router):
|
|||
action="configuration",
|
||||
conditions={'method': ['GET']})
|
||||
|
||||
def _cluster_router(self, mapper):
|
||||
cluster_resource = ClusterController().create_resource()
|
||||
mapper.connect("/{tenant_id}/clusters",
|
||||
controller=cluster_resource,
|
||||
action="index",
|
||||
conditions={'method': ['GET']})
|
||||
mapper.connect("/{tenant_id}/clusters/{id}",
|
||||
controller=cluster_resource,
|
||||
action="show",
|
||||
conditions={'method': ['GET']})
|
||||
mapper.connect("/{tenant_id}/clusters",
|
||||
controller=cluster_resource,
|
||||
action="create",
|
||||
conditions={'method': ['POST']})
|
||||
mapper.connect("/{tenant_id}/clusters/{id}",
|
||||
controller=cluster_resource,
|
||||
action="action",
|
||||
conditions={'method': ['POST']})
|
||||
mapper.connect("/{tenant_id}/clusters/{cluster_id}/instances/"
|
||||
"{instance_id}",
|
||||
controller=cluster_resource,
|
||||
action="show_instance",
|
||||
conditions={'method': ['GET']})
|
||||
mapper.connect("/{tenant_id}/clusters/{id}",
|
||||
controller=cluster_resource,
|
||||
action="delete",
|
||||
conditions={'method': ['DELETE']})
|
||||
|
||||
def _flavor_router(self, mapper):
|
||||
flavor_resource = FlavorController().create_resource()
|
||||
mapper.connect("/{tenant_id}/flavors",
|
||||
|
|
|
@ -180,6 +180,55 @@ configuration_id = {
|
|||
]
|
||||
}
|
||||
|
||||
cluster = {
|
||||
"create": {
|
||||
"type": "object",
|
||||
"required": ["cluster"],
|
||||
"additionalProperties": True,
|
||||
"properties": {
|
||||
"cluster": {
|
||||
"type": "object",
|
||||
"required": ["name", "datastore", "instances"],
|
||||
"additionalProperties": True,
|
||||
"properties": {
|
||||
"name": non_empty_string,
|
||||
"datastore": {
|
||||
"type": "object",
|
||||
"required": ["type", "version"],
|
||||
"additionalProperties": True,
|
||||
"properties": {
|
||||
"type": non_empty_string,
|
||||
"version": non_empty_string
|
||||
}
|
||||
},
|
||||
"instances": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "object",
|
||||
"required": ["flavorRef"],
|
||||
"additionalProperties": True,
|
||||
"properties": {
|
||||
"flavorRef": flavorref,
|
||||
"volume": volume
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"add_shard": {
|
||||
"type": "object",
|
||||
"required": ["add_shard"],
|
||||
"additionalProperties": True,
|
||||
"properties": {
|
||||
"add_shard": {
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
instance = {
|
||||
"create": {
|
||||
"type": "object",
|
||||
|
@ -265,6 +314,21 @@ instance = {
|
|||
}
|
||||
}
|
||||
|
||||
mgmt_cluster = {
|
||||
"action": {
|
||||
'reset-task': {
|
||||
"type": "object",
|
||||
"required": ["reset-task"],
|
||||
"additionalProperties": True,
|
||||
"properties": {
|
||||
"reset-task": {
|
||||
"type": "object"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mgmt_instance = {
|
||||
"action": {
|
||||
'migrate': {
|
||||
|
|
|
@ -92,6 +92,7 @@ common_opts = [
|
|||
cfg.IntOpt('users_page_size', default=20),
|
||||
cfg.IntOpt('databases_page_size', default=20),
|
||||
cfg.IntOpt('instances_page_size', default=20),
|
||||
cfg.IntOpt('clusters_page_size', default=20),
|
||||
cfg.IntOpt('backups_page_size', default=20),
|
||||
cfg.IntOpt('configurations_page_size', default=20),
|
||||
cfg.ListOpt('ignore_users', default=['os_admin', 'root']),
|
||||
|
@ -99,7 +100,7 @@ common_opts = [
|
|||
'mysql',
|
||||
'information_schema']),
|
||||
cfg.IntOpt('agent_call_low_timeout', default=5),
|
||||
cfg.IntOpt('agent_call_high_timeout', default=60),
|
||||
cfg.IntOpt('agent_call_high_timeout', default=1000),
|
||||
cfg.StrOpt('guest_id', default=None),
|
||||
cfg.IntOpt('state_change_wait_time', default=3 * 60),
|
||||
cfg.IntOpt('agent_heartbeat_time', default=10),
|
||||
|
@ -148,6 +149,7 @@ common_opts = [
|
|||
cfg.IntOpt('dns_time_out', default=60 * 2),
|
||||
cfg.IntOpt('resize_time_out', default=60 * 10),
|
||||
cfg.IntOpt('revert_time_out', default=60 * 10),
|
||||
cfg.IntOpt('cluster_delete_time_out', default=60 * 3),
|
||||
cfg.ListOpt('root_grant', default=['ALL']),
|
||||
cfg.BoolOpt('root_grant_option', default=True),
|
||||
cfg.IntOpt('default_password_length', default=36),
|
||||
|
@ -282,6 +284,8 @@ common_opts = [
|
|||
"(security groups, floating IPs, etc.)"),
|
||||
cfg.IntOpt('usage_timeout', default=600,
|
||||
help='Timeout to wait for a guest to become active.'),
|
||||
cfg.IntOpt('cluster_usage_timeout', default=675,
|
||||
help='Timeout to wait for a cluster to become active.'),
|
||||
]
|
||||
|
||||
# Datastore specific option groups
|
||||
|
@ -487,6 +491,27 @@ mongodb_opts = [
|
|||
default=True,
|
||||
help='Whether to provision a cinder volume for datadir.'),
|
||||
cfg.StrOpt('device_path', default='/dev/vdb'),
|
||||
cfg.IntOpt('num_config_servers_per_cluster', default=3,
|
||||
help='The number of config servers to create per cluster.'),
|
||||
cfg.IntOpt('num_query_routers_per_cluster', default=1,
|
||||
help='The number of query routers (mongos) to create '
|
||||
'per cluster.'),
|
||||
cfg.BoolOpt('cluster_support', default=True,
|
||||
help='Enable clusters to be created and managed.'),
|
||||
cfg.StrOpt('api_strategy',
|
||||
default='trove.common.strategies.mongodb.api.'
|
||||
'MongoDbAPIStrategy',
|
||||
help='Class that implements datastore-specific API logic.'),
|
||||
cfg.StrOpt('taskmanager_strategy',
|
||||
default='trove.common.strategies.mongodb.taskmanager.'
|
||||
'MongoDbTaskManagerStrategy',
|
||||
help='Class that implements datastore-specific task manager '
|
||||
'logic.'),
|
||||
cfg.StrOpt('guestagent_strategy',
|
||||
default='trove.common.strategies.mongodb.guestagent.'
|
||||
'MongoDbGuestAgentStrategy',
|
||||
help='Class that implements datastore-specific guest agent API '
|
||||
'logic.'),
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
|
|
@ -463,3 +463,35 @@ class InsufficientSpaceForSlave(TroveError):
|
|||
message = _("The target instance has only %(slave_volume_size)sG free, "
|
||||
"but the replication snapshot contains %(dataset_size)sG "
|
||||
"of data.")
|
||||
|
||||
|
||||
class ClusterNotFound(NotFound):
|
||||
message = _("Cluster '%(cluster)s' cannot be found.")
|
||||
|
||||
|
||||
class ClusterFlavorsNotEqual(TroveError):
|
||||
message = _("The flavor for each instance in a cluster must be equal.")
|
||||
|
||||
|
||||
class ClusterVolumeSizesNotEqual(TroveError):
|
||||
message = _("The volume size for each instance in a cluster must be "
|
||||
"equal.")
|
||||
|
||||
|
||||
class ClusterNumInstancesNotSupported(TroveError):
|
||||
message = _("The number of instances for your initial cluster must "
|
||||
"be %(num_instances)s.")
|
||||
|
||||
|
||||
class ClusterInstanceOperationNotSupported(TroveError):
|
||||
message = _("Operation not supported for instances that are part of a "
|
||||
"cluster.")
|
||||
|
||||
|
||||
class TroveOperationAuthError(TroveError):
|
||||
message = _("Operation not allowed for tenant %(tenant_id)s.")
|
||||
|
||||
|
||||
class ClusterDatastoreNotSupported(TroveError):
|
||||
message = _("Clusters not supported for "
|
||||
"%(datastore)s-%(datastore_version)s.")
|
||||
|
|
|
@ -0,0 +1,51 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
class BaseAPIStrategy(object):
|
||||
|
||||
@property
|
||||
def cluster_class(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def cluster_controller_actions(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def cluster_view_class(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def mgmt_cluster_view_class(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class BaseTaskManagerStrategy(object):
|
||||
|
||||
@property
|
||||
def task_manager_api_class(self, context):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def task_manager_cluster_tasks_class(self, context):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class BaseGuestAgentStrategy(object):
|
||||
|
||||
@property
|
||||
def guest_client_class(self):
|
||||
raise NotImplementedError()
|
|
@ -0,0 +1,289 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from novaclient import exceptions as nova_exceptions
|
||||
|
||||
from trove.cluster import models
|
||||
from trove.cluster.tasks import ClusterTasks
|
||||
from trove.cluster.views import ClusterView
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.common import remote
|
||||
from trove.common.strategies import base
|
||||
from trove.common import utils
|
||||
from trove.common.views import create_links
|
||||
from trove.common import wsgi
|
||||
from trove.datastore import models as datastore_models
|
||||
from trove.extensions.mgmt.clusters.views import MgmtClusterView
|
||||
from trove.instance import models as inst_models
|
||||
from trove.openstack.common.gettextutils import _
|
||||
from trove.openstack.common import log as logging
|
||||
from trove.quota.quota import check_quotas
|
||||
from trove.taskmanager import api as task_api
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class MongoDbAPIStrategy(base.BaseAPIStrategy):
|
||||
|
||||
@property
|
||||
def cluster_class(self):
|
||||
return MongoDbCluster
|
||||
|
||||
@property
|
||||
def cluster_controller_actions(self):
|
||||
return {'add_shard': self._action_add_shard}
|
||||
|
||||
def _action_add_shard(self, cluster, body):
|
||||
cluster.add_shard()
|
||||
return wsgi.Result(None, 202)
|
||||
|
||||
@property
|
||||
def cluster_view_class(self):
|
||||
return MongoDbClusterView
|
||||
|
||||
@property
|
||||
def mgmt_cluster_view_class(self):
|
||||
return MongoDbMgmtClusterView
|
||||
|
||||
|
||||
class MongoDbCluster(models.Cluster):
|
||||
|
||||
@classmethod
|
||||
def create(cls, context, name, datastore, datastore_version, instances):
|
||||
|
||||
# TODO(amcreynolds): consider moving into CONF and even supporting
|
||||
# TODO(amcreynolds): an array of values, e.g. [3, 5, 7]
|
||||
# TODO(amcreynolds): or introduce a min/max num_instances and set
|
||||
# TODO(amcreynolds): both to 3
|
||||
num_instances = len(instances)
|
||||
if num_instances != 3:
|
||||
raise exception.ClusterNumInstancesNotSupported(num_instances=3)
|
||||
|
||||
flavor_ids = [instance['flavor_id'] for instance in instances]
|
||||
if len(set(flavor_ids)) != 1:
|
||||
raise exception.ClusterFlavorsNotEqual()
|
||||
flavor_id = flavor_ids[0]
|
||||
nova_client = remote.create_nova_client(context)
|
||||
try:
|
||||
flavor = nova_client.flavors.get(flavor_id)
|
||||
except nova_exceptions.NotFound:
|
||||
raise exception.FlavorNotFound(uuid=flavor_id)
|
||||
mongo_conf = CONF.get(datastore_version.manager)
|
||||
num_configsvr = mongo_conf.num_config_servers_per_cluster
|
||||
num_mongos = mongo_conf.num_query_routers_per_cluster
|
||||
delta_instances = num_instances + num_configsvr + num_mongos
|
||||
deltas = {'instances': delta_instances}
|
||||
|
||||
volume_sizes = [instance['volume_size'] for instance in instances
|
||||
if instance.get('volume_size', None)]
|
||||
volume_size = None
|
||||
if mongo_conf.volume_support:
|
||||
if len(set(volume_sizes)) != 1:
|
||||
raise exception.ClusterVolumeSizesNotEqual()
|
||||
volume_size = volume_sizes[0]
|
||||
models.validate_volume_size(volume_size)
|
||||
# TODO(amcreynolds): for now, mongos+configsvr same flavor+disk
|
||||
deltas['volumes'] = volume_size * delta_instances
|
||||
else:
|
||||
# TODO(amcreynolds): is ephemeral possible for mongodb clusters?
|
||||
if len(volume_sizes) > 0:
|
||||
raise exception.VolumeNotSupported()
|
||||
ephemeral_support = mongo_conf.device_path
|
||||
if ephemeral_support and flavor.ephemeral == 0:
|
||||
raise exception.LocalStorageNotSpecified(flavor=flavor_id)
|
||||
|
||||
check_quotas(context.tenant, deltas)
|
||||
|
||||
db_info = models.DBCluster.create(
|
||||
name=name, tenant_id=context.tenant,
|
||||
datastore_version_id=datastore_version.id,
|
||||
task_status=ClusterTasks.BUILDING_INITIAL)
|
||||
|
||||
replica_set_name = "rs1"
|
||||
|
||||
member_config = {"id": db_info.id,
|
||||
"shard_id": utils.generate_uuid(),
|
||||
"instance_type": "member",
|
||||
"replica_set_name": replica_set_name}
|
||||
for i in range(1, num_instances + 1):
|
||||
instance_name = "%s-%s-%s" % (name, replica_set_name, str(i))
|
||||
inst_models.Instance.create(context, instance_name,
|
||||
flavor_id,
|
||||
datastore_version.image_id,
|
||||
[], [], datastore,
|
||||
datastore_version,
|
||||
volume_size, None,
|
||||
availability_zone=None,
|
||||
nics=None,
|
||||
configuration_id=None,
|
||||
cluster_config=member_config)
|
||||
|
||||
configsvr_config = {"id": db_info.id,
|
||||
"instance_type": "config_server"}
|
||||
for i in range(1, num_configsvr + 1):
|
||||
instance_name = "%s-%s-%s" % (name, "configsvr", str(i))
|
||||
inst_models.Instance.create(context, instance_name,
|
||||
flavor_id,
|
||||
datastore_version.image_id,
|
||||
[], [], datastore,
|
||||
datastore_version,
|
||||
volume_size, None,
|
||||
availability_zone=None,
|
||||
nics=None,
|
||||
configuration_id=None,
|
||||
cluster_config=configsvr_config)
|
||||
|
||||
mongos_config = {"id": db_info.id,
|
||||
"instance_type": "query_router"}
|
||||
for i in range(1, num_mongos + 1):
|
||||
instance_name = "%s-%s-%s" % (name, "mongos", str(i))
|
||||
inst_models.Instance.create(context, instance_name,
|
||||
flavor_id,
|
||||
datastore_version.image_id,
|
||||
[], [], datastore,
|
||||
datastore_version,
|
||||
volume_size, None,
|
||||
availability_zone=None,
|
||||
nics=None,
|
||||
configuration_id=None,
|
||||
cluster_config=mongos_config)
|
||||
|
||||
task_api.load(context, datastore_version.manager).create_cluster(
|
||||
db_info.id)
|
||||
|
||||
return MongoDbCluster(context, db_info, datastore, datastore_version)
|
||||
|
||||
def add_shard(self):
|
||||
|
||||
if self.db_info.task_status != ClusterTasks.NONE:
|
||||
current_task = self.db_info.task_status.name
|
||||
msg = _("This action cannot be performed on the cluster while "
|
||||
"the current cluster task is '%s'.") % current_task
|
||||
LOG.error(msg)
|
||||
raise exception.UnprocessableEntity(msg)
|
||||
|
||||
db_insts = inst_models.DBInstance.find_all(cluster_id=self.id,
|
||||
type='member').all()
|
||||
num_unique_shards = len(set([db_inst.shard_id for db_inst
|
||||
in db_insts]))
|
||||
arbitrary_shard_id = db_insts[0].shard_id
|
||||
members_in_shard = [db_inst for db_inst in db_insts
|
||||
if db_inst.shard_id == arbitrary_shard_id]
|
||||
num_members_per_shard = len(members_in_shard)
|
||||
a_member = inst_models.load_any_instance(self.context,
|
||||
members_in_shard[0].id)
|
||||
deltas = {'instances': num_members_per_shard}
|
||||
volume_size = a_member.volume_size
|
||||
if volume_size:
|
||||
deltas['volumes'] = volume_size * num_members_per_shard
|
||||
check_quotas(self.context.tenant, deltas)
|
||||
new_replica_set_name = "rs" + str(num_unique_shards + 1)
|
||||
new_shard_id = utils.generate_uuid()
|
||||
member_config = {"id": self.id,
|
||||
"shard_id": new_shard_id,
|
||||
"instance_type": "member",
|
||||
"replica_set_name": new_replica_set_name}
|
||||
for i in range(1, num_members_per_shard + 1):
|
||||
instance_name = "%s-%s-%s" % (self.name, new_replica_set_name,
|
||||
str(i))
|
||||
inst_models.Instance.create(self.context, instance_name,
|
||||
a_member.flavor_id,
|
||||
a_member.datastore_version.image_id,
|
||||
[], [], a_member.datastore,
|
||||
a_member.datastore_version,
|
||||
volume_size, None,
|
||||
availability_zone=None,
|
||||
nics=None,
|
||||
configuration_id=None,
|
||||
cluster_config=member_config)
|
||||
|
||||
self.update_db(task_status=ClusterTasks.ADDING_SHARD)
|
||||
manager = (datastore_models.DatastoreVersion.
|
||||
load_by_uuid(db_insts[0].datastore_version_id).manager)
|
||||
task_api.load(self.context, manager).mongodb_add_shard_cluster(
|
||||
self.id,
|
||||
new_shard_id,
|
||||
new_replica_set_name)
|
||||
|
||||
|
||||
class MongoDbClusterView(ClusterView):
|
||||
|
||||
def build_instances(self):
|
||||
instances = []
|
||||
ip_list = []
|
||||
if self.load_servers:
|
||||
cluster_instances = self.cluster.instances
|
||||
else:
|
||||
cluster_instances = self.cluster.instances_without_server
|
||||
for instance in cluster_instances:
|
||||
if self.load_servers and instance.type == 'query_router':
|
||||
ip = instance.get_visible_ip_addresses()
|
||||
if ip:
|
||||
ip_list.append(ip[0])
|
||||
if instance.type != 'member':
|
||||
continue
|
||||
instance_dict = {
|
||||
"id": instance.id,
|
||||
"name": instance.name,
|
||||
"links": create_links("instances", self.req, instance.id)
|
||||
}
|
||||
if instance.shard_id:
|
||||
instance_dict["shard_id"] = instance.shard_id
|
||||
if self.load_servers:
|
||||
instance_dict["status"] = instance.status
|
||||
if CONF.get(instance.datastore_version.manager).volume_support:
|
||||
instance_dict["volume"] = {"size": instance.volume_size}
|
||||
instance_dict["flavor"] = self._build_flavor_info(
|
||||
instance.flavor_id)
|
||||
instances.append(instance_dict)
|
||||
ip_list.sort()
|
||||
return instances, ip_list
|
||||
|
||||
|
||||
class MongoDbMgmtClusterView(MgmtClusterView):
|
||||
|
||||
def build_instances(self):
|
||||
instances = []
|
||||
ip_list = []
|
||||
if self.load_servers:
|
||||
cluster_instances = self.cluster.instances
|
||||
else:
|
||||
cluster_instances = self.cluster.instances_without_server
|
||||
for instance in cluster_instances:
|
||||
instance_dict = {
|
||||
"id": instance.id,
|
||||
"name": instance.name,
|
||||
"type": instance.type,
|
||||
"links": create_links("instances", self.req, instance.id)
|
||||
}
|
||||
instance_ips = instance.get_visible_ip_addresses()
|
||||
if self.load_servers and instance_ips:
|
||||
instance_dict["ip"] = instance_ips
|
||||
if instance.type == 'query_router':
|
||||
ip_list.append(instance_ips[0])
|
||||
if instance.shard_id:
|
||||
instance_dict["shard_id"] = instance.shard_id
|
||||
if self.load_servers:
|
||||
instance_dict["status"] = instance.status
|
||||
if CONF.get(instance.datastore_version.manager).volume_support:
|
||||
instance_dict["volume"] = {"size": instance.volume_size}
|
||||
instance_dict["flavor"] = self._build_flavor_info(
|
||||
instance.flavor_id)
|
||||
instances.append(instance_dict)
|
||||
ip_list.sort()
|
||||
return instances, ip_list
|
|
@ -0,0 +1,51 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo.config.cfg import NoSuchOptError
|
||||
|
||||
from trove.common import cfg
|
||||
from trove.common.utils import import_class
|
||||
from trove.openstack.common import log as logging
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def load_api_strategy(manager):
|
||||
clazz = CONF.get(manager).get('api_strategy')
|
||||
LOG.debug("Loading class %s" % clazz)
|
||||
api_strategy = import_class(clazz)
|
||||
return api_strategy()
|
||||
|
||||
|
||||
def load_taskmanager_strategy(manager):
|
||||
try:
|
||||
clazz = CONF.get(manager).get('taskmanager_strategy')
|
||||
LOG.debug("Loading class %s" % clazz)
|
||||
taskmanager_strategy = import_class(clazz)
|
||||
return taskmanager_strategy()
|
||||
except NoSuchOptError:
|
||||
return None
|
||||
|
||||
|
||||
def load_guestagent_strategy(manager):
|
||||
try:
|
||||
clazz = CONF.get(manager).get('guestagent_strategy')
|
||||
LOG.debug("Loading class %s" % clazz)
|
||||
guestagent_strategy = import_class(clazz)
|
||||
return guestagent_strategy()
|
||||
except NoSuchOptError:
|
||||
return None
|
|
@ -333,7 +333,8 @@ class Controller(object):
|
|||
exception.UserNotFound,
|
||||
exception.DatabaseNotFound,
|
||||
exception.QuotaResourceUnknown,
|
||||
exception.BackupFileNotFound
|
||||
exception.BackupFileNotFound,
|
||||
exception.ClusterNotFound
|
||||
],
|
||||
webob.exc.HTTPConflict: [
|
||||
exception.BackupNotCompleteError,
|
||||
|
@ -351,7 +352,9 @@ class Controller(object):
|
|||
webob.exc.HTTPNotImplemented: [
|
||||
exception.VolumeNotSupported,
|
||||
exception.LocalStorageNotSupported,
|
||||
exception.DatastoreOperationNotSupported
|
||||
exception.DatastoreOperationNotSupported,
|
||||
exception.ClusterInstanceOperationNotSupported,
|
||||
exception.ClusterDatastoreNotSupported
|
||||
],
|
||||
}
|
||||
|
||||
|
|
|
@ -63,6 +63,8 @@ def map(engine, models):
|
|||
Table('configuration_parameters', meta, autoload=True))
|
||||
orm.mapper(models['conductor_lastseen'],
|
||||
Table('conductor_lastseen', meta, autoload=True))
|
||||
orm.mapper(models['clusters'],
|
||||
Table('clusters', meta, autoload=True))
|
||||
|
||||
|
||||
def mapping_exists(model):
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from sqlalchemy import ForeignKey
|
||||
from sqlalchemy.exc import OperationalError
|
||||
from sqlalchemy.schema import Column
|
||||
from sqlalchemy.schema import Index
|
||||
from sqlalchemy.schema import MetaData
|
||||
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import Boolean
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import create_tables
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import DateTime
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import Integer
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import String
|
||||
from trove.db.sqlalchemy.migrate_repo.schema import Table
|
||||
from trove.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema')
|
||||
|
||||
meta = MetaData()
|
||||
|
||||
clusters = Table(
|
||||
'clusters',
|
||||
meta,
|
||||
Column('id', String(36), primary_key=True, nullable=False),
|
||||
Column('created', DateTime(), nullable=False),
|
||||
Column('updated', DateTime(), nullable=False),
|
||||
Column('name', String(255), nullable=False),
|
||||
Column('task_id', Integer(), nullable=False),
|
||||
Column('tenant_id', String(36), nullable=False),
|
||||
Column("datastore_version_id", String(36),
|
||||
ForeignKey('datastore_versions.id'), nullable=False),
|
||||
Column('deleted', Boolean()),
|
||||
Column('deleted_at', DateTime()),
|
||||
Index("clusters_tenant_id", "tenant_id"),
|
||||
Index("clusters_deleted", "deleted"),)
|
||||
|
||||
|
||||
def upgrade(migrate_engine):
|
||||
meta.bind = migrate_engine
|
||||
Table('datastores', meta, autoload=True)
|
||||
Table('datastore_versions', meta, autoload=True)
|
||||
instances = Table('instances', meta, autoload=True)
|
||||
|
||||
# since the downgrade is a no-op, an upgrade after a downgrade will
|
||||
# cause an exception because the tables already exist
|
||||
# we will catch that case and log an info message
|
||||
try:
|
||||
create_tables([clusters])
|
||||
|
||||
instances.create_column(Column('cluster_id', String(36),
|
||||
ForeignKey("clusters.id")))
|
||||
instances.create_column(Column('shard_id', String(36)))
|
||||
instances.create_column(Column('type', String(64)))
|
||||
|
||||
cluster_id_idx = Index("instances_cluster_id", instances.c.cluster_id)
|
||||
cluster_id_idx.create()
|
||||
except OperationalError as e:
|
||||
logger.info(e)
|
||||
|
||||
|
||||
def downgrade(migrate_engine):
|
||||
meta.bind = migrate_engine
|
||||
# not dropping the table on a rollback because the cluster
|
||||
# assets will still exist
|
|
@ -49,6 +49,7 @@ def configure_db(options, models_mapper=None):
|
|||
from trove.extensions.security_group import models as secgrp_models
|
||||
from trove.configuration import models as configurations_models
|
||||
from trove.conductor import models as conductor_models
|
||||
from trove.cluster import models as cluster_models
|
||||
|
||||
model_modules = [
|
||||
base_models,
|
||||
|
@ -61,6 +62,7 @@ def configure_db(options, models_mapper=None):
|
|||
secgrp_models,
|
||||
configurations_models,
|
||||
conductor_models,
|
||||
cluster_models,
|
||||
]
|
||||
|
||||
models = {}
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from trove.cluster import models as cluster_models
|
||||
from trove.instance import models as instance_models
|
||||
|
||||
|
||||
class MgmtCluster(cluster_models.Cluster):
|
||||
def __init__(self, context, db_info, datastore=None,
|
||||
datastore_version=None):
|
||||
super(MgmtCluster, self).__init__(context, db_info, datastore,
|
||||
datastore_version)
|
||||
|
||||
@classmethod
|
||||
def load(cls, context, id):
|
||||
db_cluster = cluster_models.DBCluster.find_by(id=id)
|
||||
return cls(context, db_cluster)
|
||||
|
||||
@classmethod
|
||||
def load_all(cls, context, deleted=None):
|
||||
args = {}
|
||||
if deleted is not None:
|
||||
args['deleted'] = deleted
|
||||
db_infos = cluster_models.DBCluster.find_all(**args)
|
||||
clusters = [cls(context, db_info) for db_info in db_infos]
|
||||
return clusters
|
||||
|
||||
@property
|
||||
def instances(self):
|
||||
db_instances = instance_models.DBInstance.find_all(
|
||||
cluster_id=self.db_info.id, deleted=False)
|
||||
instances = [instance_models.load_any_instance(
|
||||
self.context, db_inst.id) for db_inst in db_instances]
|
||||
return instances
|
|
@ -0,0 +1,98 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from trove.cluster.service import ClusterController
|
||||
from trove.common import exception
|
||||
from trove.common import wsgi
|
||||
from trove.common.auth import admin_context
|
||||
from trove.extensions.mgmt.clusters import models
|
||||
from trove.extensions.mgmt.clusters import views
|
||||
from trove.openstack.common import log as logging
|
||||
from trove.openstack.common.gettextutils import _
|
||||
import trove.common.apischema as apischema
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ClusterController(ClusterController):
|
||||
"""Controller for cluster functionality."""
|
||||
schemas = apischema.mgmt_cluster
|
||||
|
||||
@classmethod
|
||||
def get_action_schema(cls, body, action_schema):
|
||||
action_type = body.keys()[0]
|
||||
return action_schema.get(action_type, {})
|
||||
|
||||
@admin_context
|
||||
def index(self, req, tenant_id):
|
||||
"""Return a list of clusters."""
|
||||
LOG.debug("Showing a list of clusters for tenant '%s'." % tenant_id)
|
||||
LOG.info(_("req : '%s'\n\n") % req)
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
deleted = None
|
||||
deleted_q = req.GET.get('deleted', '').lower()
|
||||
if deleted_q in ['true']:
|
||||
deleted = True
|
||||
elif deleted_q in ['false']:
|
||||
deleted = False
|
||||
clusters = models.MgmtCluster.load_all(context, deleted=deleted)
|
||||
view_cls = views.MgmtClustersView
|
||||
return wsgi.Result(view_cls(clusters, req=req).data(), 200)
|
||||
|
||||
@admin_context
|
||||
def show(self, req, tenant_id, id):
|
||||
"""Return a single cluster."""
|
||||
LOG.debug("Showing cluster for tenant '%s'." % tenant_id)
|
||||
LOG.info(_("req : '%s'\n\n") % req)
|
||||
LOG.info(_("id : '%s'\n\n") % id)
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
cluster = models.MgmtCluster.load(context, id)
|
||||
return wsgi.Result(
|
||||
views.load_mgmt_view(cluster, req=req).data(),
|
||||
200)
|
||||
|
||||
@admin_context
|
||||
def action(self, req, body, tenant_id, id):
|
||||
LOG.debug("Committing an action against cluster %(cluster)s for "
|
||||
"tenant '%(tenant)s'." % {'cluster': id,
|
||||
'tenant': tenant_id})
|
||||
LOG.info(_("req : '%s'\n\n") % req)
|
||||
if not body:
|
||||
raise exception.BadRequest(_("Invalid request body."))
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
cluster = models.MgmtCluster.load(context=context, id=id)
|
||||
_actions = {
|
||||
'reset-task': self._action_reset_task
|
||||
}
|
||||
selected_action = None
|
||||
for key in body:
|
||||
if key in _actions:
|
||||
if selected_action is not None:
|
||||
msg = _("Only one action can be specified per request.")
|
||||
raise exception.BadRequest(msg)
|
||||
selected_action = _actions[key]
|
||||
else:
|
||||
msg = _("Invalid cluster action: %s.") % key
|
||||
raise exception.BadRequest(msg)
|
||||
|
||||
if selected_action:
|
||||
return selected_action(context, cluster, body)
|
||||
else:
|
||||
raise exception.BadRequest(_("Invalid request body."))
|
||||
|
||||
def _action_reset_task(self, context, cluster, body):
|
||||
cluster.reset_task()
|
||||
return wsgi.Result(None, 202)
|
|
@ -0,0 +1,58 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from trove.cluster.views import ClusterView
|
||||
from trove.common import strategy
|
||||
|
||||
|
||||
class MgmtClusterView(ClusterView):
|
||||
|
||||
def __init__(self, cluster, req=None, load_servers=True):
|
||||
super(MgmtClusterView, self).__init__(cluster, req, load_servers)
|
||||
|
||||
def data(self):
|
||||
result = super(MgmtClusterView, self).data()
|
||||
result['cluster']['tenant_id'] = self.cluster.tenant_id
|
||||
result['cluster']['deleted'] = bool(self.cluster.deleted)
|
||||
if self.cluster.deleted_at:
|
||||
result['cluster']['deleted_at'] = self.cluster.deleted_at
|
||||
return result
|
||||
|
||||
def build_instances(self):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class MgmtClustersView(object):
|
||||
"""Shows a list of MgmtCluster objects."""
|
||||
|
||||
def __init__(self, clusters, req=None):
|
||||
self.clusters = clusters
|
||||
self.req = req
|
||||
|
||||
def data(self):
|
||||
data = []
|
||||
for cluster in self.clusters:
|
||||
data.append(self.data_for_cluster(cluster))
|
||||
return {'clusters': data}
|
||||
|
||||
def data_for_cluster(self, cluster):
|
||||
view = load_mgmt_view(cluster, req=self.req, load_servers=False)
|
||||
return view.data()['cluster']
|
||||
|
||||
|
||||
def load_mgmt_view(cluster, req, load_servers=True):
|
||||
manager = cluster.datastore_version.manager
|
||||
return strategy.load_api_strategy(manager).mgmt_cluster_view_class(
|
||||
cluster, req, load_servers)
|
|
@ -27,7 +27,8 @@ LOG = logging.getLogger(__name__)
|
|||
CONF = cfg.CONF
|
||||
|
||||
|
||||
def load_mgmt_instances(context, deleted=None, client=None):
|
||||
def load_mgmt_instances(context, deleted=None, client=None,
|
||||
include_clustered=None):
|
||||
if not client:
|
||||
client = remote.create_nova_client(context)
|
||||
try:
|
||||
|
@ -36,10 +37,13 @@ def load_mgmt_instances(context, deleted=None, client=None):
|
|||
mgmt_servers = client.servers.list(search_opts={'all_tenants': 1})
|
||||
LOG.info("Found %d servers in Nova" %
|
||||
len(mgmt_servers if mgmt_servers else []))
|
||||
args = {}
|
||||
if deleted is not None:
|
||||
db_infos = instance_models.DBInstance.find_all(deleted=deleted)
|
||||
else:
|
||||
db_infos = instance_models.DBInstance.find_all()
|
||||
args['deleted'] = deleted
|
||||
if not include_clustered:
|
||||
args['cluster_id'] = None
|
||||
db_infos = instance_models.DBInstance.find_all(**args)
|
||||
|
||||
instances = MgmtInstances.load_status_from_existing(context, db_infos,
|
||||
mgmt_servers)
|
||||
return instances
|
||||
|
|
|
@ -56,8 +56,11 @@ class MgmtInstanceController(InstanceController):
|
|||
deleted = True
|
||||
elif deleted_q in ['false']:
|
||||
deleted = False
|
||||
clustered_q = req.GET.get('include_clustered', '').lower()
|
||||
include_clustered = clustered_q == 'true'
|
||||
try:
|
||||
instances = models.load_mgmt_instances(context, deleted=deleted)
|
||||
instances = models.load_mgmt_instances(
|
||||
context, deleted=deleted, include_clustered=include_clustered)
|
||||
except nova_exceptions.ClientException as e:
|
||||
LOG.error(e)
|
||||
return wsgi.Result(str(e), 403)
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
from trove.openstack.common import log as logging
|
||||
|
||||
from trove.common import extensions
|
||||
from trove.extensions.mgmt.clusters.service import ClusterController
|
||||
from trove.extensions.mgmt.instances.service import MgmtInstanceController
|
||||
from trove.extensions.mgmt.host.service import HostController
|
||||
from trove.extensions.mgmt.quota.service import QuotaController
|
||||
|
@ -45,6 +46,7 @@ class Mgmt(extensions.ExtensionDescriptor):
|
|||
|
||||
def get_resources(self):
|
||||
resources = []
|
||||
|
||||
instances = extensions.ResourceExtension(
|
||||
'{tenant_id}/mgmt/instances',
|
||||
MgmtInstanceController(),
|
||||
|
@ -54,6 +56,12 @@ class Mgmt(extensions.ExtensionDescriptor):
|
|||
'action': 'POST'})
|
||||
resources.append(instances)
|
||||
|
||||
clusters = extensions.ResourceExtension(
|
||||
'{tenant_id}/mgmt/clusters',
|
||||
ClusterController(),
|
||||
member_actions={'action': 'POST'})
|
||||
resources.append(clusters)
|
||||
|
||||
hosts = extensions.ResourceExtension(
|
||||
'{tenant_id}/mgmt/hosts',
|
||||
HostController(),
|
||||
|
|
|
@ -217,6 +217,10 @@ class SimpleInstance(object):
|
|||
def id(self):
|
||||
return self.db_info.id
|
||||
|
||||
@property
|
||||
def type(self):
|
||||
return self.db_info.type
|
||||
|
||||
@property
|
||||
def tenant_id(self):
|
||||
return self.db_info.tenant_id
|
||||
|
@ -366,6 +370,14 @@ class SimpleInstance(object):
|
|||
deleted=False).all()
|
||||
return self.slave_list
|
||||
|
||||
@property
|
||||
def cluster_id(self):
|
||||
return self.db_info.cluster_id
|
||||
|
||||
@property
|
||||
def shard_id(self):
|
||||
return self.db_info.shard_id
|
||||
|
||||
|
||||
class DetailInstance(SimpleInstance):
|
||||
"""A detailed view of an Instance.
|
||||
|
@ -397,7 +409,7 @@ class DetailInstance(SimpleInstance):
|
|||
self._volume_total = value
|
||||
|
||||
|
||||
def get_db_info(context, id):
|
||||
def get_db_info(context, id, cluster_id=None):
|
||||
"""
|
||||
Retrieves an instance of the managed datastore from the persisted
|
||||
storage based on the ID and Context
|
||||
|
@ -405,6 +417,8 @@ def get_db_info(context, id):
|
|||
:type context: trove.common.context.TroveContext
|
||||
:param id: the unique ID of the instance
|
||||
:type id: unicode or str
|
||||
:param cluster_id: the unique ID of the cluster
|
||||
:type cluster_id: unicode or str
|
||||
:return: a record of the instance as its state exists in persisted storage
|
||||
:rtype: trove.instance.models.DBInstance
|
||||
"""
|
||||
|
@ -413,17 +427,22 @@ def get_db_info(context, id):
|
|||
elif id is None:
|
||||
raise TypeError("Argument id not defined.")
|
||||
try:
|
||||
db_info = DBInstance.find_by(context=context, id=id, deleted=False)
|
||||
if cluster_id is not None:
|
||||
db_info = DBInstance.find_by(context=context, id=id,
|
||||
cluster_id=cluster_id, deleted=False)
|
||||
else:
|
||||
db_info = DBInstance.find_by(context=context, id=id, deleted=False)
|
||||
except exception.NotFound:
|
||||
raise exception.NotFound(uuid=id)
|
||||
return db_info
|
||||
|
||||
|
||||
def load_any_instance(context, id):
|
||||
def load_any_instance(context, id, load_server=True):
|
||||
# Try to load an instance with a server.
|
||||
# If that fails, try to load it without the server.
|
||||
try:
|
||||
return load_instance(BuiltInstance, context, id, needs_server=True)
|
||||
return load_instance(BuiltInstance, context, id,
|
||||
needs_server=load_server)
|
||||
except exception.UnprocessableEntity:
|
||||
LOG.warn(_("Could not load instance %s.") % id)
|
||||
return load_instance(FreshInstance, context, id, needs_server=False)
|
||||
|
@ -456,8 +475,8 @@ def load_instance(cls, context, id, needs_server=False):
|
|||
return cls(context, db_info, server, service_status)
|
||||
|
||||
|
||||
def load_instance_with_guest(cls, context, id):
|
||||
db_info = get_db_info(context, id)
|
||||
def load_instance_with_guest(cls, context, id, cluster_id=None):
|
||||
db_info = get_db_info(context, id, cluster_id)
|
||||
load_simple_instance_server_status(context, db_info)
|
||||
service_status = InstanceServiceStatus.find_by(instance_id=id)
|
||||
LOG.debug("Instance %(instance_id)s service status is %(service_status)s."
|
||||
|
@ -529,6 +548,12 @@ class BaseInstance(SimpleInstance):
|
|||
% self.id)
|
||||
LOG.debug("Deleting instance with compute id = %s." %
|
||||
self.db_info.compute_instance_id)
|
||||
|
||||
from trove.cluster.models import is_cluster_deleting
|
||||
if (self.db_info.cluster_id is not None and not
|
||||
is_cluster_deleting(self.context, self.db_info.cluster_id)):
|
||||
raise exception.ClusterInstanceOperationNotSupported()
|
||||
|
||||
self.update_db(task_status=InstanceTasks.DELETING,
|
||||
configuration_id=None)
|
||||
task_api.API(self.context).delete_instance(self.id)
|
||||
|
@ -631,7 +656,7 @@ class Instance(BuiltInstance):
|
|||
def create(cls, context, name, flavor_id, image_id, databases, users,
|
||||
datastore, datastore_version, volume_size, backup_id,
|
||||
availability_zone=None, nics=None, configuration_id=None,
|
||||
slave_of_id=None):
|
||||
slave_of_id=None, cluster_config=None):
|
||||
|
||||
datastore_cfg = CONF.get(datastore_version.manager)
|
||||
client = create_nova_client(context)
|
||||
|
@ -684,6 +709,13 @@ class Instance(BuiltInstance):
|
|||
|
||||
def _create_resources():
|
||||
|
||||
if cluster_config:
|
||||
cluster_id = cluster_config.get("id", None)
|
||||
shard_id = cluster_config.get("shard_id", None)
|
||||
instance_type = cluster_config.get("instance_type", None)
|
||||
else:
|
||||
cluster_id = shard_id = instance_type = None
|
||||
|
||||
db_info = DBInstance.create(name=name, flavor_id=flavor_id,
|
||||
tenant_id=context.tenant,
|
||||
volume_size=volume_size,
|
||||
|
@ -691,7 +723,10 @@ class Instance(BuiltInstance):
|
|||
datastore_version.id,
|
||||
task_status=InstanceTasks.BUILDING,
|
||||
configuration_id=configuration_id,
|
||||
slave_of_id=slave_of_id)
|
||||
slave_of_id=slave_of_id,
|
||||
cluster_id=cluster_id,
|
||||
shard_id=shard_id,
|
||||
type=instance_type)
|
||||
LOG.debug("Tenant %(tenant)s created new Trove instance %(db)s."
|
||||
% {'tenant': context.tenant, 'db': db_info.id})
|
||||
|
||||
|
@ -722,10 +757,9 @@ class Instance(BuiltInstance):
|
|||
datastore_version.packages,
|
||||
volume_size, backup_id,
|
||||
availability_zone,
|
||||
root_password,
|
||||
nics,
|
||||
overrides,
|
||||
slave_of_id)
|
||||
root_password, nics,
|
||||
overrides, slave_of_id,
|
||||
cluster_config)
|
||||
|
||||
return SimpleInstance(context, db_info, datastore_status,
|
||||
root_password)
|
||||
|
@ -752,6 +786,8 @@ class Instance(BuiltInstance):
|
|||
LOG.info(_("Resizing instance %(instance_id)s flavor to "
|
||||
"%(flavor_id)s.")
|
||||
% {'instance_id': self.id, 'flavor_id': new_flavor_id})
|
||||
if self.db_info.cluster_id is not None:
|
||||
raise exception.ClusterInstanceOperationNotSupported()
|
||||
# Validate that the flavor can be found and that it isn't the same size
|
||||
# as the current one.
|
||||
client = create_nova_client(self.context)
|
||||
|
@ -788,6 +824,8 @@ class Instance(BuiltInstance):
|
|||
def _resize_resources():
|
||||
self.validate_can_perform_action()
|
||||
LOG.info(_("Resizing volume of instance %s.") % self.id)
|
||||
if self.db_info.cluster_id is not None:
|
||||
raise exception.ClusterInstanceOperationNotSupported()
|
||||
old_size = self.volume_size
|
||||
if int(new_size) <= old_size:
|
||||
raise exception.BadRequest(_("The new volume 'size' must be "
|
||||
|
@ -809,12 +847,16 @@ class Instance(BuiltInstance):
|
|||
def reboot(self):
|
||||
self.validate_can_perform_action()
|
||||
LOG.info(_("Rebooting instance %s.") % self.id)
|
||||
if self.db_info.cluster_id is not None and not self.context.is_admin:
|
||||
raise exception.ClusterInstanceOperationNotSupported()
|
||||
self.update_db(task_status=InstanceTasks.REBOOTING)
|
||||
task_api.API(self.context).reboot(self.id)
|
||||
|
||||
def restart(self):
|
||||
self.validate_can_perform_action()
|
||||
LOG.info(_("Restarting datastore on instance %s.") % self.id)
|
||||
if self.db_info.cluster_id is not None and not self.context.is_admin:
|
||||
raise exception.ClusterInstanceOperationNotSupported()
|
||||
# Set our local status since Nova might not change it quick enough.
|
||||
#TODO(tim.simpson): Possible bad stuff can happen if this service
|
||||
# shuts down before it can set status to NONE.
|
||||
|
@ -958,7 +1000,7 @@ class Instances(object):
|
|||
DEFAULT_LIMIT = CONF.instances_page_size
|
||||
|
||||
@staticmethod
|
||||
def load(context):
|
||||
def load(context, include_clustered):
|
||||
|
||||
def load_simple_instance(context, db, status, **kwargs):
|
||||
return SimpleInstance(context, db, status)
|
||||
|
@ -968,7 +1010,13 @@ class Instances(object):
|
|||
client = create_nova_client(context)
|
||||
servers = client.servers.list()
|
||||
|
||||
db_infos = DBInstance.find_all(tenant_id=context.tenant, deleted=False)
|
||||
if include_clustered:
|
||||
db_infos = DBInstance.find_all(tenant_id=context.tenant,
|
||||
deleted=False)
|
||||
else:
|
||||
db_infos = DBInstance.find_all(tenant_id=context.tenant,
|
||||
cluster_id=None,
|
||||
deleted=False)
|
||||
limit = int(context.limit or Instances.DEFAULT_LIMIT)
|
||||
if limit > Instances.DEFAULT_LIMIT:
|
||||
limit = Instances.DEFAULT_LIMIT
|
||||
|
@ -987,6 +1035,14 @@ class Instances(object):
|
|||
find_server)
|
||||
return ret, next_marker
|
||||
|
||||
@staticmethod
|
||||
def load_all_by_cluster_id(context, cluster_id, load_servers=True):
|
||||
db_instances = DBInstance.find_all(cluster_id=cluster_id,
|
||||
deleted=False)
|
||||
return [load_any_instance(context, db_inst.id,
|
||||
load_server=load_servers)
|
||||
for db_inst in db_instances]
|
||||
|
||||
@staticmethod
|
||||
def _load_servers_status(load_instance, context, db_items, find_server):
|
||||
ret = []
|
||||
|
@ -1031,7 +1087,8 @@ class DBInstance(dbmodels.DatabaseModelBase):
|
|||
_data_fields = ['name', 'created', 'compute_instance_id',
|
||||
'task_id', 'task_description', 'task_start_time',
|
||||
'volume_id', 'deleted', 'tenant_id',
|
||||
'datastore_version_id', 'configuration_id', 'slave_of_id']
|
||||
'datastore_version_id', 'configuration_id', 'slave_of_id',
|
||||
'cluster_id', 'shard_id', 'type']
|
||||
|
||||
def __init__(self, task_status, **kwargs):
|
||||
"""
|
||||
|
|
|
@ -133,7 +133,9 @@ class InstanceController(wsgi.Controller):
|
|||
LOG.info(_("Listing database instances for tenant '%s'") % tenant_id)
|
||||
LOG.debug("req : '%s'\n\n" % req)
|
||||
context = req.environ[wsgi.CONTEXT_KEY]
|
||||
servers, marker = models.Instances.load(context)
|
||||
clustered_q = req.GET.get('include_clustered', '').lower()
|
||||
include_clustered = clustered_q == 'true'
|
||||
servers, marker = models.Instances.load(context, include_clustered)
|
||||
view = views.InstancesView(servers, req=req)
|
||||
paged = pagination.SimplePaginatedDataView(req.url, 'instances', view,
|
||||
marker)
|
||||
|
|
|
@ -110,6 +110,12 @@ class InstanceDetailView(InstanceView):
|
|||
if self.instance.root_password:
|
||||
result['instance']['password'] = self.instance.root_password
|
||||
|
||||
if self.instance.cluster_id:
|
||||
result['instance']['cluster_id'] = self.instance.cluster_id
|
||||
|
||||
if self.instance.shard_id:
|
||||
result['instance']['shard_id'] = self.instance.shard_id
|
||||
|
||||
return result
|
||||
|
||||
def _build_slaves_info(self):
|
||||
|
|
|
@ -120,16 +120,15 @@ class DbQuotaDriver(object):
|
|||
|
||||
return quotas
|
||||
|
||||
def reserve(self, tenant_id, resources, deltas):
|
||||
"""Check quotas and reserve resources for a tenant.
|
||||
def check_quotas(self, tenant_id, resources, deltas):
|
||||
"""Check quotas for a tenant.
|
||||
|
||||
This method checks quotas against current usage,
|
||||
reserved resources and the desired deltas.
|
||||
|
||||
If any of the proposed values is over the defined quota, an
|
||||
QuotaExceeded exception will be raised with the sorted list of the
|
||||
resources which are too high. Otherwise, the method returns a
|
||||
list of reservation objects which were created.
|
||||
resources which are too high.
|
||||
|
||||
:param tenant_id: The ID of the tenant reserving the resources.
|
||||
:param resources: A dictionary of the registered resources.
|
||||
|
@ -155,8 +154,28 @@ class DbQuotaDriver(object):
|
|||
if overs:
|
||||
raise exception.QuotaExceeded(overs=sorted(overs))
|
||||
|
||||
def reserve(self, tenant_id, resources, deltas):
|
||||
"""Check quotas and reserve resources for a tenant.
|
||||
|
||||
This method checks quotas against current usage,
|
||||
reserved resources and the desired deltas.
|
||||
|
||||
If any of the proposed values is over the defined quota, an
|
||||
QuotaExceeded exception will be raised with the sorted list of the
|
||||
resources which are too high. Otherwise, the method returns a
|
||||
list of reservation objects which were created.
|
||||
|
||||
:param tenant_id: The ID of the tenant reserving the resources.
|
||||
:param resources: A dictionary of the registered resources.
|
||||
:param deltas: A dictionary of the proposed delta changes.
|
||||
"""
|
||||
|
||||
self.check_quotas(tenant_id, resources, deltas)
|
||||
quota_usages = self.get_all_quota_usages_by_tenant(tenant_id,
|
||||
deltas.keys())
|
||||
|
||||
reservations = []
|
||||
for resource in deltas:
|
||||
for resource in sorted(deltas):
|
||||
reserved = deltas[resource]
|
||||
usage = quota_usages[resource]
|
||||
usage.reserved += reserved
|
||||
|
@ -249,6 +268,9 @@ class QuotaEngine(object):
|
|||
return self._driver.get_all_quotas_by_tenant(tenant_id,
|
||||
self._resources)
|
||||
|
||||
def check_quotas(self, tenant_id, **deltas):
|
||||
self._driver.check_quotas(tenant_id, self._resources, deltas)
|
||||
|
||||
def reserve(self, tenant_id, **deltas):
|
||||
"""Check quotas and reserve resources.
|
||||
|
||||
|
@ -331,3 +353,7 @@ def run_with_quotas(tenant_id, deltas, f):
|
|||
else:
|
||||
QUOTAS.commit(reservations)
|
||||
return result
|
||||
|
||||
|
||||
def check_quotas(tenant_id, deltas):
|
||||
QUOTAS.check_quotas(tenant_id, **deltas)
|
||||
|
|
|
@ -114,7 +114,9 @@ class API(proxy.RpcProxy):
|
|||
image_id, databases, users, datastore_manager,
|
||||
packages, volume_size, backup_id=None,
|
||||
availability_zone=None, root_password=None,
|
||||
nics=None, overrides=None, slave_of_id=None):
|
||||
nics=None, overrides=None, slave_of_id=None,
|
||||
cluster_config=None):
|
||||
|
||||
LOG.debug("Making async call to create instance %s " % instance_id)
|
||||
self.cast(self.context,
|
||||
self.make_msg("create_instance",
|
||||
|
@ -132,7 +134,8 @@ class API(proxy.RpcProxy):
|
|||
root_password=root_password,
|
||||
nics=nics,
|
||||
overrides=overrides,
|
||||
slave_of_id=slave_of_id))
|
||||
slave_of_id=slave_of_id,
|
||||
cluster_config=cluster_config))
|
||||
|
||||
def update_overrides(self, instance_id, overrides=None):
|
||||
LOG.debug("Making async call to update datastore configurations for "
|
||||
|
@ -152,3 +155,13 @@ class API(proxy.RpcProxy):
|
|||
instance_id=instance_id,
|
||||
flavor=self._transform_obj(flavor),
|
||||
configuration_id=configuration_id))
|
||||
|
||||
def create_cluster(self, cluster_id):
|
||||
pass
|
||||
|
||||
def delete_cluster(self, cluster_id):
|
||||
pass
|
||||
|
||||
|
||||
def load(context, manager=None):
|
||||
pass
|
||||
|
|
|
@ -83,6 +83,7 @@ class Manager(periodic_task.PeriodicTasks):
|
|||
datastore_manager, packages, volume_size,
|
||||
availability_zone,
|
||||
root_password, nics, overrides, slave_of_id):
|
||||
|
||||
instance_tasks = FreshInstanceTasks.load(context, instance_id)
|
||||
|
||||
snapshot = instance_tasks.get_replication_master_snapshot(context,
|
||||
|
@ -99,7 +100,8 @@ class Manager(periodic_task.PeriodicTasks):
|
|||
def create_instance(self, context, instance_id, name, flavor,
|
||||
image_id, databases, users, datastore_manager,
|
||||
packages, volume_size, backup_id, availability_zone,
|
||||
root_password, nics, overrides, slave_of_id):
|
||||
root_password, nics, overrides, slave_of_id,
|
||||
cluster_config):
|
||||
if slave_of_id:
|
||||
self._create_replication_slave(context, instance_id, name,
|
||||
flavor, image_id, databases, users,
|
||||
|
|
|
@ -60,6 +60,7 @@ class BackupCreateTest(testtools.TestCase):
|
|||
return_value=None)
|
||||
instance.datastore_version = MagicMock()
|
||||
instance.datastore_version.id = 'datastore-id-999'
|
||||
instance.cluster_id = None
|
||||
with patch.object(models.Backup, 'validate_can_perform_action',
|
||||
return_value=None):
|
||||
with patch.object(models.Backup, 'verify_swift_auth_token',
|
||||
|
@ -96,6 +97,7 @@ class BackupCreateTest(testtools.TestCase):
|
|||
return_value=None)
|
||||
instance.datastore_version = MagicMock()
|
||||
instance.datastore_version.id = 'datastore-id-999'
|
||||
instance.cluster_id = None
|
||||
with patch.object(models.Backup, 'validate_can_perform_action',
|
||||
return_value=None):
|
||||
with patch.object(models.Backup, 'verify_swift_auth_token',
|
||||
|
@ -140,6 +142,7 @@ class BackupCreateTest(testtools.TestCase):
|
|||
return_value=instance):
|
||||
instance.validate_can_perform_action = MagicMock(
|
||||
return_value=None)
|
||||
instance.cluster_id = None
|
||||
with patch.object(models.Backup, 'validate_can_perform_action',
|
||||
return_value=None):
|
||||
with patch.object(models.Backup, 'verify_swift_auth_token',
|
||||
|
|
|
@ -0,0 +1,208 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import uuid
|
||||
|
||||
from mock import Mock
|
||||
from mock import patch
|
||||
from testtools import TestCase
|
||||
from trove.cluster.models import Cluster
|
||||
from trove.cluster.models import ClusterTasks
|
||||
from trove.cluster.models import DBCluster
|
||||
from trove.common import cfg
|
||||
from trove.common import exception
|
||||
from trove.common import remote
|
||||
from trove.common.strategies.mongodb import api as mongodb_api
|
||||
from trove.datastore import models as datastore_models
|
||||
from trove.instance import models as inst_models
|
||||
from trove.instance.models import DBInstance
|
||||
from trove.instance.tasks import InstanceTasks
|
||||
from trove.quota.quota import QUOTAS
|
||||
from trove.taskmanager import api as task_api
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class ClusterTest(TestCase):
|
||||
def setUp(self):
|
||||
super(ClusterTest, self).setUp()
|
||||
self.cluster_id = str(uuid.uuid4())
|
||||
self.cluster_name = "Cluster" + self.cluster_id
|
||||
self.tenant_id = "23423432"
|
||||
self.dv_id = "1"
|
||||
self.db_info = DBCluster(ClusterTasks.NONE,
|
||||
id=self.cluster_id,
|
||||
name=self.cluster_name,
|
||||
tenant_id=self.tenant_id,
|
||||
datastore_version_id=self.dv_id,
|
||||
task_id=ClusterTasks.NONE._code)
|
||||
self.context = Mock()
|
||||
self.datastore = Mock()
|
||||
self.dv = Mock()
|
||||
self.dv.manager = "mongodb"
|
||||
self.datastore_version = self.dv
|
||||
self.cluster = mongodb_api.MongoDbCluster(self.context, self.db_info,
|
||||
self.datastore,
|
||||
self.datastore_version)
|
||||
self.instances = [{'volume_size': 1, 'flavor_id': '1234'},
|
||||
{'volume_size': 1, 'flavor_id': '1234'},
|
||||
{'volume_size': 1, 'flavor_id': '1234'}]
|
||||
self.volume_support = CONF.get(self.dv.manager).volume_support
|
||||
self.remote_nova = remote.create_nova_client
|
||||
|
||||
def tearDown(self):
|
||||
super(ClusterTest, self).tearDown()
|
||||
CONF.get(self.dv.manager).volume_support = self.volume_support
|
||||
remote.create_nova_client = self.remote_nova
|
||||
|
||||
def test_create_empty_instances(self):
|
||||
self.assertRaises(exception.ClusterNumInstancesNotSupported,
|
||||
Cluster.create,
|
||||
Mock(),
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
[]
|
||||
)
|
||||
|
||||
def test_create_unequal_flavors(self):
|
||||
instances = self.instances
|
||||
instances[0]['flavor_id'] = '4567'
|
||||
self.assertRaises(exception.ClusterFlavorsNotEqual,
|
||||
Cluster.create,
|
||||
Mock(),
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances
|
||||
)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
def test_create_unequal_volumes(self,
|
||||
mock_client):
|
||||
instances = self.instances
|
||||
instances[0]['volume_size'] = 2
|
||||
flavors = Mock()
|
||||
mock_client.return_value.flavors = flavors
|
||||
self.assertRaises(exception.ClusterVolumeSizesNotEqual,
|
||||
Cluster.create,
|
||||
Mock(),
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances
|
||||
)
|
||||
|
||||
@patch.object(remote, 'create_nova_client')
|
||||
def test_create_storage_not_specified(self,
|
||||
mock_client):
|
||||
class FakeFlavor:
|
||||
def __init__(self, flavor_id):
|
||||
self.flavor_id = flavor_id
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return self.flavor.id
|
||||
|
||||
@property
|
||||
def ephemeral(self):
|
||||
return 0
|
||||
instances = [{'flavor_id': '1234'},
|
||||
{'flavor_id': '1234'},
|
||||
{'flavor_id': '1234'}]
|
||||
CONF.get(self.dv.manager).volume_support = False
|
||||
(mock_client.return_value.
|
||||
flavors.get.return_value) = FakeFlavor('1234')
|
||||
self.assertRaises(exception.LocalStorageNotSpecified,
|
||||
Cluster.create,
|
||||
Mock(),
|
||||
self.cluster_name,
|
||||
self.datastore,
|
||||
self.datastore_version,
|
||||
instances
|
||||
)
|
||||
|
||||
def test_delete_bad_task_status(self):
|
||||
self.cluster.db_info.task_status = ClusterTasks.BUILDING_INITIAL
|
||||
self.assertRaises(exception.UnprocessableEntity,
|
||||
self.cluster.delete)
|
||||
|
||||
@patch.object(task_api.API, 'delete_cluster')
|
||||
@patch.object(Cluster, 'update_db')
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
def test_delete_task_status_none(self,
|
||||
mock_find_all,
|
||||
mock_update_db,
|
||||
mock_delete_cluster):
|
||||
self.cluster.db_info.task_status = ClusterTasks.NONE
|
||||
self.cluster.delete()
|
||||
mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING)
|
||||
|
||||
@patch.object(task_api.API, 'delete_cluster')
|
||||
@patch.object(Cluster, 'update_db')
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
def test_delete_task_status_deleting(self,
|
||||
mock_find_all,
|
||||
mock_update_db,
|
||||
mock_delete_cluster):
|
||||
self.cluster.db_info.task_status = ClusterTasks.DELETING
|
||||
self.cluster.delete()
|
||||
mock_update_db.assert_called_with(task_status=ClusterTasks.DELETING)
|
||||
|
||||
def test_add_shard_bad_task_status(self):
|
||||
task_status = ClusterTasks.BUILDING_INITIAL
|
||||
self.cluster.db_info.task_status = task_status
|
||||
self.assertRaises(exception.UnprocessableEntity,
|
||||
self.cluster.add_shard)
|
||||
|
||||
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
|
||||
@patch.object(task_api, 'load')
|
||||
@patch.object(Cluster, 'update_db')
|
||||
@patch.object(inst_models.Instance, 'create')
|
||||
@patch.object(QUOTAS, 'check_quotas')
|
||||
@patch.object(inst_models, 'load_any_instance')
|
||||
@patch.object(inst_models.DBInstance, 'find_all')
|
||||
def test_add_shard(self,
|
||||
mock_find_all,
|
||||
mock_load_any_instance,
|
||||
mock_check_quotas,
|
||||
mock_instance_create,
|
||||
mock_update_db,
|
||||
mock_task_api_load,
|
||||
mock_load_by_uuid):
|
||||
self.cluster.db_info.task_status = ClusterTasks.NONE
|
||||
(mock_find_all.return_value
|
||||
.all.return_value) = [DBInstance(InstanceTasks.NONE,
|
||||
name="TestInstance1",
|
||||
shard_id="1", id='1',
|
||||
datastore_version_id='1'),
|
||||
DBInstance(InstanceTasks.NONE,
|
||||
name="TestInstance2",
|
||||
shard_id="1", id='2',
|
||||
datastore_version_id='1'),
|
||||
DBInstance(InstanceTasks.NONE,
|
||||
name="TestInstance3",
|
||||
shard_id="1", id='3',
|
||||
datastore_version_id='1')]
|
||||
mock_datastore_version = Mock()
|
||||
mock_datastore_version.manager = 'mongodb'
|
||||
mock_load_by_uuid.return_value = mock_datastore_version
|
||||
mock_task_api = Mock()
|
||||
mock_task_api.mongodb_add_shard_cluster.return_value = None
|
||||
mock_task_api_load.return_value = mock_task_api
|
||||
self.cluster.add_shard()
|
||||
mock_update_db.assert_called_with(task_status=
|
||||
ClusterTasks.ADDING_SHARD)
|
||||
mock_task_api.mongodb_add_shard_cluster.assert_called
|
|
@ -0,0 +1,385 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
import jsonschema
|
||||
|
||||
from mock import MagicMock
|
||||
from mock import Mock
|
||||
from mock import patch
|
||||
from testtools import TestCase
|
||||
from testtools.matchers import Is, Equals
|
||||
from trove.cluster import models
|
||||
from trove.cluster.models import Cluster
|
||||
from trove.cluster.service import ClusterController
|
||||
from trove.cluster import views
|
||||
import trove.common.cfg as cfg
|
||||
from trove.common import exception
|
||||
from trove.common import strategy
|
||||
from trove.common import utils
|
||||
from trove.datastore import models as datastore_models
|
||||
|
||||
|
||||
class TestClusterController(TestCase):
|
||||
def setUp(self):
|
||||
super(TestClusterController, self).setUp()
|
||||
self.controller = ClusterController()
|
||||
self.cluster = {
|
||||
"cluster": {
|
||||
"name": "products",
|
||||
"datastore": {
|
||||
"type": "mongodb",
|
||||
"version": "2.4.10"
|
||||
},
|
||||
"instances": [
|
||||
{
|
||||
"flavorRef": "7",
|
||||
"volume": {
|
||||
"size": 1
|
||||
},
|
||||
},
|
||||
{
|
||||
"flavorRef": "7",
|
||||
"volume": {
|
||||
"size": 1
|
||||
},
|
||||
},
|
||||
{
|
||||
"flavorRef": "7",
|
||||
"volume": {
|
||||
"size": 1
|
||||
},
|
||||
},
|
||||
{
|
||||
"flavorRef": "7",
|
||||
"volume": {
|
||||
"size": 1
|
||||
},
|
||||
},
|
||||
{
|
||||
"flavorRef": "7",
|
||||
"volume": {
|
||||
"size": 1
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
self.add_shard = {
|
||||
"add_shard": {}
|
||||
}
|
||||
|
||||
def test_get_schema_create(self):
|
||||
schema = self.controller.get_schema('create', self.cluster)
|
||||
self.assertIsNotNone(schema)
|
||||
self.assertTrue('cluster' in schema['properties'])
|
||||
self.assertTrue('cluster')
|
||||
|
||||
def test_get_schema_action_add_shard(self):
|
||||
schema = self.controller.get_schema('add_shard', self.add_shard)
|
||||
self.assertIsNotNone(schema)
|
||||
self.assertTrue('add_shard' in schema['properties'])
|
||||
|
||||
def test_validate_create(self):
|
||||
body = self.cluster
|
||||
schema = self.controller.get_schema('create', body)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertTrue(validator.is_valid(body))
|
||||
|
||||
def test_validate_add_shard(self):
|
||||
body = self.add_shard
|
||||
schema = self.controller.get_schema('add_shard', body)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertTrue(validator.is_valid(body))
|
||||
|
||||
def test_validate_create_blankname(self):
|
||||
body = self.cluster
|
||||
body['cluster']['name'] = " "
|
||||
schema = self.controller.get_schema('create', body)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertFalse(validator.is_valid(body))
|
||||
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
|
||||
self.assertThat(len(errors), Is(1))
|
||||
self.assertThat(errors[0].message,
|
||||
Equals("' ' does not match '^.*[0-9a-zA-Z]+.*$'"))
|
||||
|
||||
def test_validate_create_blank_datastore(self):
|
||||
body = self.cluster
|
||||
body['cluster']['datastore']['type'] = ""
|
||||
schema = self.controller.get_schema('create', body)
|
||||
validator = jsonschema.Draft4Validator(schema)
|
||||
self.assertFalse(validator.is_valid(body))
|
||||
errors = sorted(validator.iter_errors(body), key=lambda e: e.path)
|
||||
error_messages = [error.message for error in errors]
|
||||
error_paths = [error.path.pop() for error in errors]
|
||||
self.assertThat(len(errors), Is(2))
|
||||
self.assertIn("'' is too short", error_messages)
|
||||
self.assertIn("'' does not match '^.*[0-9a-zA-Z]+.*$'", error_messages)
|
||||
self.assertIn("type", error_paths)
|
||||
|
||||
@patch.object(Cluster, 'create')
|
||||
@patch.object(datastore_models, 'get_datastore_version')
|
||||
def test_create_clusters_disabled(self,
|
||||
mock_get_datastore_version,
|
||||
mock_cluster_create):
|
||||
body = self.cluster
|
||||
tenant_id = Mock()
|
||||
context = Mock()
|
||||
|
||||
req = Mock()
|
||||
req.environ = MagicMock()
|
||||
req.environ.get = Mock(return_value=context)
|
||||
|
||||
datastore_version = Mock()
|
||||
datastore_version.manager = 'mysql'
|
||||
mock_get_datastore_version.return_value = (Mock(), datastore_version)
|
||||
|
||||
self.assertRaises(exception.ClusterDatastoreNotSupported,
|
||||
self.controller.create,
|
||||
req,
|
||||
body,
|
||||
tenant_id)
|
||||
|
||||
@patch.object(Cluster, 'create')
|
||||
@patch.object(utils, 'get_id_from_href')
|
||||
@patch.object(datastore_models, 'get_datastore_version')
|
||||
def test_create_clusters(self,
|
||||
mock_get_datastore_version,
|
||||
mock_id_from_href,
|
||||
mock_cluster_create):
|
||||
body = self.cluster
|
||||
tenant_id = Mock()
|
||||
context = Mock()
|
||||
|
||||
req = Mock()
|
||||
req.environ = Mock()
|
||||
req.environ.__getitem__ = Mock(return_value=context)
|
||||
datastore_version = Mock()
|
||||
datastore_version.manager = 'mongodb'
|
||||
datastore = Mock()
|
||||
mock_get_datastore_version.return_value = (datastore,
|
||||
datastore_version)
|
||||
instances = [{'volume_size': 1, 'flavor_id': '1234'},
|
||||
{'volume_size': 1, 'flavor_id': '1234'},
|
||||
{'volume_size': 1, 'flavor_id': '1234'},
|
||||
{'volume_size': 1, 'flavor_id': '1234'},
|
||||
{'volume_size': 1, 'flavor_id': '1234'}]
|
||||
mock_id_from_href.return_value = '1234'
|
||||
|
||||
mock_cluster = Mock()
|
||||
mock_cluster.instances = []
|
||||
mock_cluster.datastore_version.manager = 'mongodb'
|
||||
mock_cluster_create.return_value = mock_cluster
|
||||
|
||||
self.controller.create(req, body, tenant_id)
|
||||
mock_cluster_create.assert_called_with(context, 'products',
|
||||
datastore, datastore_version,
|
||||
instances)
|
||||
|
||||
@patch.object(Cluster, 'load')
|
||||
def test_show_cluster(self,
|
||||
mock_cluster_load):
|
||||
tenant_id = Mock()
|
||||
id = Mock()
|
||||
context = Mock()
|
||||
req = Mock()
|
||||
req.environ = Mock()
|
||||
req.environ.__getitem__ = Mock(return_value=context)
|
||||
|
||||
mock_cluster = Mock()
|
||||
mock_cluster.instances = []
|
||||
mock_cluster.datastore_version.manager = 'mongodb'
|
||||
mock_cluster_load.return_value = mock_cluster
|
||||
|
||||
self.controller.show(req, tenant_id, id)
|
||||
mock_cluster_load.assert_called_with(context, id)
|
||||
|
||||
@patch.object(Cluster, 'load')
|
||||
@patch.object(Cluster, 'load_instance')
|
||||
def test_show_cluster_instance(self,
|
||||
mock_cluster_load_instance,
|
||||
mock_cluster_load):
|
||||
tenant_id = Mock()
|
||||
cluster_id = Mock()
|
||||
instance_id = Mock()
|
||||
context = Mock()
|
||||
req = Mock()
|
||||
req.environ = Mock()
|
||||
req.environ.__getitem__ = Mock(return_value=context)
|
||||
cluster = Mock()
|
||||
mock_cluster_load.return_value = cluster
|
||||
cluster.id = cluster_id
|
||||
self.controller.show_instance(req, tenant_id, cluster_id, instance_id)
|
||||
mock_cluster_load_instance.assert_called_with(context, cluster.id,
|
||||
instance_id)
|
||||
|
||||
@patch.object(Cluster, 'load')
|
||||
def test_delete_cluster(self, mock_cluster_load):
|
||||
tenant_id = Mock()
|
||||
cluster_id = Mock()
|
||||
req = MagicMock()
|
||||
cluster = Mock()
|
||||
mock_cluster_load.return_value = cluster
|
||||
self.controller.delete(req, tenant_id, cluster_id)
|
||||
cluster.delete.assert_called
|
||||
|
||||
|
||||
class TestClusterControllerWithStrategy(TestCase):
|
||||
def setUp(self):
|
||||
super(TestClusterControllerWithStrategy, self).setUp()
|
||||
self.controller = ClusterController()
|
||||
self.cluster = {
|
||||
"cluster": {
|
||||
"name": "products",
|
||||
"datastore": {
|
||||
"type": "mongodb",
|
||||
"version": "2.4.10"
|
||||
},
|
||||
"instances": [
|
||||
{
|
||||
"flavorRef": "7",
|
||||
"volume": {
|
||||
"size": 1
|
||||
},
|
||||
},
|
||||
{
|
||||
"flavorRef": "7",
|
||||
"volume": {
|
||||
"size": 1
|
||||
},
|
||||
},
|
||||
{
|
||||
"flavorRef": "7",
|
||||
"volume": {
|
||||
"size": 1
|
||||
},
|
||||
},
|
||||
{
|
||||
"flavorRef": "7",
|
||||
"volume": {
|
||||
"size": 1
|
||||
},
|
||||
},
|
||||
{
|
||||
"flavorRef": "7",
|
||||
"volume": {
|
||||
"size": 1
|
||||
},
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
def tearDown(self):
|
||||
super(TestClusterControllerWithStrategy, self).tearDown()
|
||||
cfg.CONF.clear_override('cluster_support', group='mongodb')
|
||||
cfg.CONF.clear_override('api_strategy', group='mongodb')
|
||||
|
||||
@patch.object(datastore_models, 'get_datastore_version')
|
||||
@patch.object(models.Cluster, 'create')
|
||||
def test_create_clusters_disabled(self,
|
||||
mock_cluster_create,
|
||||
mock_get_datastore_version):
|
||||
|
||||
cfg.CONF.set_override('cluster_support', False, group='mongodb')
|
||||
|
||||
body = self.cluster
|
||||
tenant_id = Mock()
|
||||
context = Mock()
|
||||
|
||||
req = Mock()
|
||||
req.environ = MagicMock()
|
||||
req.environ.get = Mock(return_value=context)
|
||||
|
||||
datastore_version = Mock()
|
||||
datastore_version.manager = 'mongodb'
|
||||
mock_get_datastore_version.return_value = (Mock(), datastore_version)
|
||||
|
||||
self.assertRaises(exception.TroveError, self.controller.create, req,
|
||||
body, tenant_id)
|
||||
|
||||
@patch.object(views.ClusterView, 'data', return_value={})
|
||||
@patch.object(datastore_models, 'get_datastore_version')
|
||||
@patch.object(models.Cluster, 'create')
|
||||
def test_create_clusters_enabled(self,
|
||||
mock_cluster_create,
|
||||
mock_get_datastore_version,
|
||||
mock_cluster_view_data):
|
||||
|
||||
cfg.CONF.set_override('cluster_support', True, group='mongodb')
|
||||
|
||||
body = self.cluster
|
||||
tenant_id = Mock()
|
||||
context = Mock()
|
||||
|
||||
req = Mock()
|
||||
req.environ = MagicMock()
|
||||
req.environ.get = Mock(return_value=context)
|
||||
|
||||
datastore_version = Mock()
|
||||
datastore_version.manager = 'mongodb'
|
||||
mock_get_datastore_version.return_value = (Mock(), datastore_version)
|
||||
|
||||
mock_cluster = Mock()
|
||||
mock_cluster.datastore_version.manager = 'mongodb'
|
||||
mock_cluster_create.return_value = mock_cluster
|
||||
self.controller.create(req, body, tenant_id)
|
||||
|
||||
@patch.object(models.Cluster, 'load')
|
||||
def test_controller_action_no_strategy(self,
|
||||
mock_cluster_load):
|
||||
|
||||
body = {'do_stuff2': {}}
|
||||
tenant_id = Mock()
|
||||
context = Mock()
|
||||
id = Mock()
|
||||
|
||||
req = Mock()
|
||||
req.environ = MagicMock()
|
||||
req.environ.get = Mock(return_value=context)
|
||||
|
||||
cluster = Mock()
|
||||
cluster.datastore_version.manager = 'mongodb'
|
||||
mock_cluster_load.return_value = cluster
|
||||
|
||||
self.assertRaises(exception.TroveError, self.controller.action, req,
|
||||
body, tenant_id, id)
|
||||
|
||||
@patch.object(strategy, 'load_api_strategy')
|
||||
@patch.object(models.Cluster, 'load')
|
||||
def test_controller_action_found(self,
|
||||
mock_cluster_load,
|
||||
mock_cluster_api_strategy):
|
||||
|
||||
body = {'do_stuff': {}}
|
||||
tenant_id = Mock()
|
||||
context = Mock()
|
||||
id = Mock()
|
||||
|
||||
req = Mock()
|
||||
req.environ = MagicMock()
|
||||
req.environ.get = Mock(return_value=context)
|
||||
|
||||
cluster = Mock()
|
||||
cluster.datastore_version.manager = 'mongodb'
|
||||
mock_cluster_load.return_value = cluster
|
||||
|
||||
strat = Mock()
|
||||
do_stuff_func = Mock()
|
||||
strat.cluster_controller_actions = \
|
||||
{'do_stuff': do_stuff_func}
|
||||
mock_cluster_api_strategy.return_value = strat
|
||||
|
||||
self.controller.action(req, body, tenant_id, id)
|
||||
self.assertEqual(1, do_stuff_func.call_count)
|
|
@ -0,0 +1,37 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from mock import Mock, patch
|
||||
from testtools import TestCase
|
||||
|
||||
from trove.cluster import models
|
||||
from trove.common.strategies.mongodb.api import MongoDbCluster
|
||||
from trove.datastore import models as datastore_models
|
||||
|
||||
|
||||
class TestClusterModel(TestCase):
|
||||
|
||||
@patch.object(datastore_models.Datastore, 'load')
|
||||
@patch.object(datastore_models.DatastoreVersion, 'load_by_uuid')
|
||||
@patch.object(models.DBCluster, 'find_by')
|
||||
def test_load(self, mock_find_by, mock_load_dsv_by_uuid, mock_ds_load):
|
||||
context = Mock()
|
||||
id = Mock()
|
||||
|
||||
dsv = Mock()
|
||||
dsv.manager = 'mongodb'
|
||||
mock_load_dsv_by_uuid.return_value = dsv
|
||||
cluster = models.Cluster.load(context, id)
|
||||
self.assertTrue(isinstance(cluster, MongoDbCluster))
|
|
@ -0,0 +1,123 @@
|
|||
# Copyright 2014 eBay Software Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
from mock import Mock
|
||||
from mock import patch
|
||||
from testtools import TestCase
|
||||
from trove.cluster.views import ClusterInstanceDetailView
|
||||
from trove.cluster.views import ClusterView
|
||||
from trove.cluster.views import load_view
|
||||
from trove.common import cfg
|
||||
from trove.common.strategies.mongodb.api import MongoDbClusterView
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class ClusterViewTest(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ClusterViewTest, self).setUp()
|
||||
self.cluster = Mock()
|
||||
self.cluster.created = 'Yesterday'
|
||||
self.cluster.updated = 'Now'
|
||||
self.cluster.name = 'cluster1'
|
||||
self.cluster.datastore_version = Mock()
|
||||
self.cluster.datastore_version.name = 'mysql_test_version'
|
||||
self.cluster.instances = []
|
||||
self.cluster.instances.append(Mock())
|
||||
self.cluster.instances[0].flavor_id = '123'
|
||||
self.cluster.instances[0].volume = Mock()
|
||||
self.cluster.instances[0].volume.size = 1
|
||||
self.cluster.instances[0].slave_of_id = None
|
||||
self.cluster.instances[0].slaves = None
|
||||
|
||||
def tearDown(self):
|
||||
super(ClusterViewTest, self).tearDown()
|
||||
|
||||
@patch.object(ClusterView, 'build_instances', return_value=('10.0.0.1',
|
||||
[]))
|
||||
@patch.object(ClusterView, '_build_flavor_info')
|
||||
@patch.object(ClusterView, '_build_links')
|
||||
def test_data(self, mock_build_links,
|
||||
mock_build_flavor_info, mock_build_instances):
|
||||
mock_build_instances.return_value = Mock(), Mock()
|
||||
view = ClusterView(self.cluster, Mock())
|
||||
result = view.data()
|
||||
self.assertEqual(self.cluster.created, result['cluster']['created'])
|
||||
self.assertEqual(self.cluster.updated, result['cluster']['updated'])
|
||||
self.assertEqual(self.cluster.name, result['cluster']['name'])
|
||||
self.assertEqual(self.cluster.datastore_version.name,
|
||||
result['cluster']['datastore']['version'])
|
||||
|
||||
@patch.object(ClusterView, 'build_instances', return_value=('10.0.0.1',
|
||||
[]))
|
||||
@patch.object(ClusterView, '_build_flavor_info')
|
||||
@patch.object(ClusterView, '_build_links')
|
||||
def test_load_view(self, *args):
|
||||
cluster = Mock()
|
||||
cluster.datastore_version.manager = 'mongodb'
|
||||
view = load_view(cluster, Mock())
|
||||
self.assertTrue(isinstance(view, MongoDbClusterView))
|
||||
|
||||
|
||||
class ClusterInstanceDetailViewTest(TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ClusterInstanceDetailViewTest, self).setUp()
|
||||
self.instance = Mock()
|
||||
self.instance.created = 'Yesterday'
|
||||
self.instance.updated = 'Now'
|
||||
self.instance.datastore_version = Mock()
|
||||
self.instance.datastore_version.name = 'mysql_test_version'
|
||||
self.instance.hostname = 'test.trove.com'
|
||||
self.ip = "1.2.3.4"
|
||||
self.instance.addresses = {"private": [{"addr": self.ip}]}
|
||||
self.instance.volume_used = '3'
|
||||
self.instance.root_password = 'iloveyou'
|
||||
self.instance.get_visible_ip_addresses = lambda: ["1.2.3.4"]
|
||||
self.instance.slave_of_id = None
|
||||
self.instance.slaves = None
|
||||
|
||||
def tearDown(self):
|
||||
super(ClusterInstanceDetailViewTest, self).tearDown()
|
||||
|
||||
@patch.object(ClusterInstanceDetailView, '_build_links')
|
||||
@patch.object(ClusterInstanceDetailView, '_build_flavor_links')
|
||||
@patch.object(ClusterInstanceDetailView, '_build_configuration_info')
|
||||
def test_data(self, *args):
|
||||
view = ClusterInstanceDetailView(self.instance, Mock())
|
||||
result = view.data()
|
||||
self.assertEqual(self.instance.created, result['instance']['created'])
|
||||
self.assertEqual(self.instance.updated, result['instance']['updated'])
|
||||
self.assertEqual(self.instance.datastore_version.name,
|
||||
result['instance']['datastore']['version'])
|
||||
self.assertEqual(self.instance.hostname,
|
||||
result['instance']['hostname'])
|
||||
self.assertNotIn('ip', result['instance'])
|
||||
|
||||
@patch.object(ClusterInstanceDetailView, '_build_links')
|
||||
@patch.object(ClusterInstanceDetailView, '_build_flavor_links')
|
||||
@patch.object(ClusterInstanceDetailView, '_build_configuration_info')
|
||||
def test_data_ip(self, *args):
|
||||
self.instance.hostname = None
|
||||
view = ClusterInstanceDetailView(self.instance, Mock())
|
||||
result = view.data()
|
||||
self.assertEqual(self.instance.created, result['instance']['created'])
|
||||
self.assertEqual(self.instance.updated, result['instance']['updated'])
|
||||
self.assertEqual(self.instance.datastore_version.name,
|
||||
result['instance']['datastore']['version'])
|
||||
self.assertNotIn('hostname', result['instance'])
|
||||
self.assertEqual([self.ip], result['instance']['ip'])
|
|
@ -11,6 +11,7 @@
|
|||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import testtools
|
||||
from mock import Mock, MagicMock, patch
|
||||
from trove.quota.quota import DbQuotaDriver
|
||||
|
@ -31,7 +32,7 @@ Unit tests for the classes and functions in DbQuotaDriver.py.
|
|||
CONF = cfg.CONF
|
||||
resources = {
|
||||
Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'),
|
||||
Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'),
|
||||
Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user')
|
||||
}
|
||||
|
||||
FAKE_TENANT1 = "123456"
|
||||
|
|
Loading…
Reference in New Issue