Modify API to include cluster related operations
This patch adds new API /cluster that allows summary and detailed listings, show and update operations. It also updates service listings to return cluster_name for each service. DocImpact: 3 new policies have been added for cluster, "get", "get_all" and "update". APIImpact: Return cluster_name in service listings and add /cluster endpoint. Specs: https://review.openstack.org/327283 Implements: blueprint cinder-volume-active-active-support Change-Id: If1ef3a80900ca6d117bf854ad3de142d93694adf
This commit is contained in:
parent
62f761ff16
commit
8b713e5327
@ -85,6 +85,11 @@ class ServiceController(wsgi.Controller):
|
||||
'zone': svc.availability_zone,
|
||||
'status': active, 'state': art,
|
||||
'updated_at': updated_at}
|
||||
|
||||
# On V3.7 we added cluster support
|
||||
if req.api_version_request.matches('3.7'):
|
||||
ret_fields['cluster'] = svc.cluster_name
|
||||
|
||||
if detailed:
|
||||
ret_fields['disabled_reason'] = svc.disabled_reason
|
||||
if svc.binary == "cinder-volume":
|
||||
@ -153,8 +158,7 @@ class ServiceController(wsgi.Controller):
|
||||
try:
|
||||
host = body['host']
|
||||
except (TypeError, KeyError):
|
||||
msg = _("Missing required element 'host' in request body.")
|
||||
raise webob.exc.HTTPBadRequest(explanation=msg)
|
||||
raise exception.MissingRequired(element='host')
|
||||
|
||||
ret_val['disabled'] = disabled
|
||||
if id == "disable-log-reason" and ext_loaded:
|
||||
|
@ -54,6 +54,7 @@ REST_API_VERSION_HISTORY = """
|
||||
* 3.5 - Add pagination support to messages API.
|
||||
* 3.6 - Allows to set empty description and empty name for consistency
|
||||
group in consisgroup-update operation.
|
||||
* 3.7 - Add cluster API and cluster_name field to service list API
|
||||
|
||||
"""
|
||||
|
||||
@ -62,7 +63,7 @@ REST_API_VERSION_HISTORY = """
|
||||
# minimum version of the API supported.
|
||||
# Explicitly using /v1 or /v2 enpoints will still work
|
||||
_MIN_API_VERSION = "3.0"
|
||||
_MAX_API_VERSION = "3.6"
|
||||
_MAX_API_VERSION = "3.7"
|
||||
_LEGACY_API_VERSION1 = "1.0"
|
||||
_LEGACY_API_VERSION2 = "2.0"
|
||||
|
||||
|
@ -69,3 +69,80 @@ user documentation.
|
||||
---
|
||||
Allowed to set empty description and empty name for consistency
|
||||
group in consisgroup-update operation.
|
||||
|
||||
3.7
|
||||
---
|
||||
Added ``cluster_name`` field to service list/detail.
|
||||
|
||||
Added /clusters endpoint to list/show/update clusters.
|
||||
|
||||
Show endpoint requires the cluster name and optionally the binary as a URL
|
||||
paramter (default is "cinder-volume"). Returns:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
"cluster": {
|
||||
"created_at": ...,
|
||||
"disabled_reason": null,
|
||||
"last_heartbeat": ...,
|
||||
"name": "cluster_name",
|
||||
"num_down_hosts": 4,
|
||||
"num_hosts": 2,
|
||||
"state": "up",
|
||||
"status": "enabled",
|
||||
"updated_at": ...
|
||||
}
|
||||
|
||||
Update endpoint allows enabling and disabling a cluster in a similar way to
|
||||
service's update endpoint, but in the body we must specify the name and
|
||||
optionally the binary ("cinder-volume" is the default) and the disabled
|
||||
reason. Returns:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
"cluster": {
|
||||
"name": "cluster_name",
|
||||
"state": "up",
|
||||
"status": "enabled"
|
||||
"disabled_reason": null
|
||||
}
|
||||
|
||||
Index and detail accept filtering by `name`, `binary`, `disabled`,
|
||||
`num_hosts` , `num_down_hosts`, and up/down status (`is_up`) as URL
|
||||
parameters.
|
||||
|
||||
Index endpoint returns:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
"clusters": [
|
||||
{
|
||||
"name": "cluster_name",
|
||||
"state": "up",
|
||||
"status": "enabled"
|
||||
},
|
||||
{
|
||||
...
|
||||
}
|
||||
]
|
||||
|
||||
Detail endpoint returns:
|
||||
|
||||
.. code-block:: json
|
||||
|
||||
"clusters": [
|
||||
{
|
||||
"created_at": ...,
|
||||
"disabled_reason": null,
|
||||
"last_heartbeat": ...,
|
||||
"name": "cluster_name",
|
||||
"num_down_hosts": 4,
|
||||
"num_hosts": 2,
|
||||
"state": "up",
|
||||
"status": "enabled",
|
||||
"updated_at": ...
|
||||
},
|
||||
{
|
||||
...
|
||||
}
|
||||
]
|
||||
|
@ -33,6 +33,7 @@ from cinder.api.openstack import versioned_method
|
||||
from cinder import exception
|
||||
from cinder import i18n
|
||||
from cinder.i18n import _, _LE, _LI
|
||||
from cinder import policy
|
||||
from cinder import utils
|
||||
from cinder.wsgi import common as wsgi
|
||||
|
||||
@ -1295,6 +1296,23 @@ class Controller(object):
|
||||
except exception.InvalidInput as error:
|
||||
raise webob.exc.HTTPBadRequest(explanation=error.msg)
|
||||
|
||||
@staticmethod
|
||||
def get_policy_checker(prefix):
|
||||
@staticmethod
|
||||
def policy_checker(req, action, resource=None):
|
||||
ctxt = req.environ['cinder.context']
|
||||
target = {
|
||||
'project_id': ctxt.project_id,
|
||||
'user_id': ctxt.user_id,
|
||||
}
|
||||
if resource:
|
||||
target.update(resource)
|
||||
|
||||
_action = '%s:%s' % (prefix, action)
|
||||
policy.enforce(ctxt, _action, target)
|
||||
return ctxt
|
||||
return policy_checker
|
||||
|
||||
|
||||
class Fault(webob.exc.HTTPException):
|
||||
"""Wrap webob.exc.HTTPException to provide API friendly response."""
|
||||
|
132
cinder/api/v3/clusters.py
Normal file
132
cinder/api/v3/clusters.py
Normal file
@ -0,0 +1,132 @@
|
||||
# Copyright (c) 2016 Red Hat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.v3.views import clusters as clusters_view
|
||||
from cinder import exception
|
||||
from cinder.i18n import _
|
||||
from cinder import objects
|
||||
from cinder import utils
|
||||
|
||||
|
||||
CLUSTER_MICRO_VERSION = '3.7'
|
||||
|
||||
|
||||
class ClusterController(wsgi.Controller):
|
||||
allowed_list_keys = {'name', 'binary', 'is_up', 'disabled', 'num_hosts',
|
||||
'num_down_hosts', 'binary'}
|
||||
|
||||
policy_checker = wsgi.Controller.get_policy_checker('clusters')
|
||||
|
||||
@wsgi.Controller.api_version(CLUSTER_MICRO_VERSION)
|
||||
def show(self, req, id, binary='cinder-volume'):
|
||||
"""Return data for a given cluster name with optional binary."""
|
||||
# Let the wsgi middleware convert NotAuthorized exceptions
|
||||
context = self.policy_checker(req, 'get')
|
||||
# Let the wsgi middleware convert NotFound exceptions
|
||||
cluster = objects.Cluster.get_by_id(context, None, binary=binary,
|
||||
name=id, services_summary=True)
|
||||
return clusters_view.ViewBuilder.detail(cluster)
|
||||
|
||||
@wsgi.Controller.api_version(CLUSTER_MICRO_VERSION)
|
||||
def index(self, req):
|
||||
"""Return a non detailed list of all existing clusters.
|
||||
|
||||
Filter by is_up, disabled, num_hosts, and num_down_hosts.
|
||||
"""
|
||||
return self._get_clusters(req, detail=False)
|
||||
|
||||
@wsgi.Controller.api_version(CLUSTER_MICRO_VERSION)
|
||||
def detail(self, req):
|
||||
"""Return a detailed list of all existing clusters.
|
||||
|
||||
Filter by is_up, disabled, num_hosts, and num_down_hosts.
|
||||
"""
|
||||
return self._get_clusters(req, detail=True)
|
||||
|
||||
def _get_clusters(self, req, detail):
|
||||
# Let the wsgi middleware convert NotAuthorized exceptions
|
||||
context = self.policy_checker(req, 'get_all')
|
||||
|
||||
filters = dict(req.GET)
|
||||
allowed = self.allowed_list_keys
|
||||
|
||||
# Check filters are valid
|
||||
if not allowed.issuperset(filters):
|
||||
invalid_keys = set(filters).difference(allowed)
|
||||
msg = _('Invalid filter keys: %s') % ', '.join(invalid_keys)
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
# Check boolean values
|
||||
for bool_key in ('disabled', 'is_up'):
|
||||
if bool_key in filters:
|
||||
filters[bool_key] = utils.get_bool_param(bool_key, req.GET)
|
||||
|
||||
# For detailed view we need the services summary information
|
||||
filters['services_summary'] = detail
|
||||
|
||||
clusters = objects.ClusterList.get_all(context, **filters)
|
||||
return clusters_view.ViewBuilder.list(clusters, detail)
|
||||
|
||||
@wsgi.Controller.api_version(CLUSTER_MICRO_VERSION)
|
||||
def update(self, req, id, body):
|
||||
"""Enable/Disable scheduling for a cluster."""
|
||||
# NOTE(geguileo): This method tries to be consistent with services
|
||||
# update endpoint API.
|
||||
|
||||
# Let the wsgi middleware convert NotAuthorized exceptions
|
||||
context = self.policy_checker(req, 'update')
|
||||
|
||||
if id not in ('enable', 'disable'):
|
||||
raise exception.NotFound(message=_("Unknown action"))
|
||||
|
||||
disabled = id != 'enable'
|
||||
disabled_reason = self._get_disabled_reason(body) if disabled else None
|
||||
|
||||
if not disabled and disabled_reason:
|
||||
msg = _("Unexpected 'disabled_reason' found on enable request.")
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
name = body.get('name')
|
||||
if not name:
|
||||
raise exception.MissingRequired(element='name')
|
||||
|
||||
binary = body.get('binary', 'cinder-volume')
|
||||
|
||||
# Let wsgi handle NotFound exception
|
||||
cluster = objects.Cluster.get_by_id(context, None, binary=binary,
|
||||
name=name)
|
||||
cluster.disabled = disabled
|
||||
cluster.disabled_reason = disabled_reason
|
||||
cluster.save()
|
||||
|
||||
# We return summary data plus the disabled reason
|
||||
ret_val = clusters_view.ViewBuilder.summary(cluster)
|
||||
ret_val['cluster']['disabled_reason'] = disabled_reason
|
||||
|
||||
return ret_val
|
||||
|
||||
def _get_disabled_reason(self, body):
|
||||
reason = body.get('disabled_reason')
|
||||
if reason:
|
||||
# Let wsgi handle InvalidInput exception
|
||||
reason = reason.strip()
|
||||
utils.check_string_length(reason, 'Disabled reason', min_length=1,
|
||||
max_length=255)
|
||||
return reason
|
||||
|
||||
|
||||
def create_resource():
|
||||
return wsgi.Resource(ClusterController())
|
@ -26,6 +26,7 @@ from cinder.api.v2 import snapshot_metadata
|
||||
from cinder.api.v2 import snapshots
|
||||
from cinder.api.v2 import types
|
||||
from cinder.api.v2 import volume_metadata
|
||||
from cinder.api.v3 import clusters
|
||||
from cinder.api.v3 import consistencygroups
|
||||
from cinder.api.v3 import messages
|
||||
from cinder.api.v3 import volumes
|
||||
@ -55,6 +56,11 @@ class APIRouter(cinder.api.openstack.APIRouter):
|
||||
controller=self.resources['messages'],
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
self.resources['clusters'] = clusters.create_resource()
|
||||
mapper.resource('cluster', 'clusters',
|
||||
controller=self.resources['clusters'],
|
||||
collection={'detail': 'GET'})
|
||||
|
||||
self.resources['types'] = types.create_resource()
|
||||
mapper.resource("type", "types",
|
||||
controller=self.resources['types'],
|
||||
|
63
cinder/api/v3/views/clusters.py
Normal file
63
cinder/api/v3/views/clusters.py
Normal file
@ -0,0 +1,63 @@
|
||||
# Copyright (c) 2016 Red Hat Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_utils import timeutils
|
||||
|
||||
|
||||
class ViewBuilder(object):
|
||||
"""Map Cluster into dicts for API responses."""
|
||||
|
||||
_collection_name = 'clusters'
|
||||
|
||||
@staticmethod
|
||||
def _normalize(date):
|
||||
if date:
|
||||
return timeutils.normalize_time(date)
|
||||
return ''
|
||||
|
||||
@classmethod
|
||||
def detail(cls, cluster, flat=False):
|
||||
"""Detailed view of a cluster."""
|
||||
result = cls.summary(cluster, flat=True)
|
||||
result.update(
|
||||
num_hosts=cluster.num_hosts,
|
||||
num_down_hosts=cluster.num_down_hosts,
|
||||
last_heartbeat=cls._normalize(cluster.last_heartbeat),
|
||||
created_at=cls._normalize(cluster.created_at),
|
||||
updated_at=cls._normalize(cluster.updated_at),
|
||||
disabled_reason=cluster.disabled_reason
|
||||
)
|
||||
|
||||
if flat:
|
||||
return result
|
||||
return {'cluster': result}
|
||||
|
||||
@staticmethod
|
||||
def summary(cluster, flat=False):
|
||||
"""Generic, non-detailed view of a cluster."""
|
||||
result = {
|
||||
'name': cluster.name,
|
||||
'binary': cluster.binary,
|
||||
'state': 'up' if cluster.is_up() else 'down',
|
||||
'status': 'disabled' if cluster.disabled else 'enabled',
|
||||
}
|
||||
if flat:
|
||||
return result
|
||||
return {'cluster': result}
|
||||
|
||||
@classmethod
|
||||
def list(cls, clusters, detail=False):
|
||||
func = cls.detail if detail else cls.summary
|
||||
return {'clusters': [func(n, flat=True) for n in clusters]}
|
@ -265,6 +265,10 @@ class InvalidGlobalAPIVersion(Invalid):
|
||||
"is %(min_ver)s and maximum is %(max_ver)s.")
|
||||
|
||||
|
||||
class MissingRequired(Invalid):
|
||||
message = _("Missing required element '%(element)s' in request body.")
|
||||
|
||||
|
||||
class APIException(CinderException):
|
||||
message = _("Error while requesting %(service)s API.")
|
||||
|
||||
|
@ -22,6 +22,7 @@ import webob.exc
|
||||
|
||||
from cinder.api.contrib import services
|
||||
from cinder.api import extensions
|
||||
from cinder.api.openstack import api_version_request as api_version
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
@ -32,6 +33,7 @@ from cinder.tests.unit import fake_constants as fake
|
||||
fake_services_list = [
|
||||
{'binary': 'cinder-scheduler',
|
||||
'host': 'host1',
|
||||
'cluster_name': None,
|
||||
'availability_zone': 'cinder',
|
||||
'id': 1,
|
||||
'disabled': True,
|
||||
@ -41,6 +43,7 @@ fake_services_list = [
|
||||
'modified_at': ''},
|
||||
{'binary': 'cinder-volume',
|
||||
'host': 'host1',
|
||||
'cluster_name': None,
|
||||
'availability_zone': 'cinder',
|
||||
'id': 2,
|
||||
'disabled': True,
|
||||
@ -50,6 +53,7 @@ fake_services_list = [
|
||||
'modified_at': ''},
|
||||
{'binary': 'cinder-scheduler',
|
||||
'host': 'host2',
|
||||
'cluster_name': 'cluster1',
|
||||
'availability_zone': 'cinder',
|
||||
'id': 3,
|
||||
'disabled': False,
|
||||
@ -59,6 +63,7 @@ fake_services_list = [
|
||||
'modified_at': ''},
|
||||
{'binary': 'cinder-volume',
|
||||
'host': 'host2',
|
||||
'cluster_name': 'cluster1',
|
||||
'availability_zone': 'cinder',
|
||||
'id': 4,
|
||||
'disabled': True,
|
||||
@ -68,6 +73,7 @@ fake_services_list = [
|
||||
'modified_at': ''},
|
||||
{'binary': 'cinder-volume',
|
||||
'host': 'host2',
|
||||
'cluster_name': 'cluster2',
|
||||
'availability_zone': 'cinder',
|
||||
'id': 5,
|
||||
'disabled': True,
|
||||
@ -77,6 +83,7 @@ fake_services_list = [
|
||||
'modified_at': datetime.datetime(2012, 10, 29, 13, 42, 5)},
|
||||
{'binary': 'cinder-volume',
|
||||
'host': 'host2',
|
||||
'cluster_name': 'cluster2',
|
||||
'availability_zone': 'cinder',
|
||||
'id': 6,
|
||||
'disabled': False,
|
||||
@ -86,8 +93,9 @@ fake_services_list = [
|
||||
'modified_at': datetime.datetime(2012, 9, 18, 8, 1, 38)},
|
||||
{'binary': 'cinder-scheduler',
|
||||
'host': 'host2',
|
||||
'cluster_name': None,
|
||||
'availability_zone': 'cinder',
|
||||
'id': 6,
|
||||
'id': 7,
|
||||
'disabled': False,
|
||||
'updated_at': None,
|
||||
'created_at': datetime.datetime(2012, 9, 18, 2, 46, 28),
|
||||
@ -98,36 +106,45 @@ fake_services_list = [
|
||||
|
||||
class FakeRequest(object):
|
||||
environ = {"cinder.context": context.get_admin_context()}
|
||||
GET = {}
|
||||
|
||||
def __init__(self, version='3.0', **kwargs):
|
||||
self.GET = kwargs
|
||||
self.headers = {'OpenStack-API-Version': 'volume ' + version}
|
||||
self.api_version_request = api_version.APIVersionRequest(version)
|
||||
|
||||
|
||||
# NOTE(uni): deprecating service request key, binary takes precedence
|
||||
# Still keeping service key here for API compatibility sake.
|
||||
class FakeRequestWithService(object):
|
||||
environ = {"cinder.context": context.get_admin_context()}
|
||||
GET = {"service": "cinder-volume"}
|
||||
class FakeRequestWithService(FakeRequest):
|
||||
def __init__(self, **kwargs):
|
||||
kwargs.setdefault('service', 'cinder-volume')
|
||||
super(FakeRequestWithService, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class FakeRequestWithBinary(object):
|
||||
environ = {"cinder.context": context.get_admin_context()}
|
||||
GET = {"binary": "cinder-volume"}
|
||||
class FakeRequestWithBinary(FakeRequest):
|
||||
def __init__(self, **kwargs):
|
||||
kwargs.setdefault('binary', 'cinder-volume')
|
||||
super(FakeRequestWithBinary, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class FakeRequestWithHost(object):
|
||||
environ = {"cinder.context": context.get_admin_context()}
|
||||
GET = {"host": "host1"}
|
||||
class FakeRequestWithHost(FakeRequest):
|
||||
def __init__(self, **kwargs):
|
||||
kwargs.setdefault('host', 'host1')
|
||||
super(FakeRequestWithHost, self).__init__(**kwargs)
|
||||
|
||||
|
||||
# NOTE(uni): deprecating service request key, binary takes precedence
|
||||
# Still keeping service key here for API compatibility sake.
|
||||
class FakeRequestWithHostService(object):
|
||||
environ = {"cinder.context": context.get_admin_context()}
|
||||
GET = {"host": "host1", "service": "cinder-volume"}
|
||||
class FakeRequestWithHostService(FakeRequestWithService):
|
||||
def __init__(self, **kwargs):
|
||||
kwargs.setdefault('host', 'host1')
|
||||
super(FakeRequestWithHostService, self).__init__(**kwargs)
|
||||
|
||||
|
||||
class FakeRequestWithHostBinary(object):
|
||||
environ = {"cinder.context": context.get_admin_context()}
|
||||
GET = {"host": "host1", "binary": "cinder-volume"}
|
||||
class FakeRequestWithHostBinary(FakeRequestWithBinary):
|
||||
def __init__(self, **kwargs):
|
||||
kwargs.setdefault('host', 'host1')
|
||||
super(FakeRequestWithHostBinary, self).__init__(**kwargs)
|
||||
|
||||
|
||||
def fake_service_get_all(context, **filters):
|
||||
@ -236,6 +253,59 @@ class ServicesTest(test.TestCase):
|
||||
]}
|
||||
self.assertEqual(response, res_dict)
|
||||
|
||||
def test_services_list_with_cluster_name(self):
|
||||
req = FakeRequest(version='3.7')
|
||||
res_dict = self.controller.index(req)
|
||||
|
||||
response = {'services': [{'binary': 'cinder-scheduler',
|
||||
'cluster': None,
|
||||
'host': 'host1', 'zone': 'cinder',
|
||||
'status': 'disabled', 'state': 'up',
|
||||
'updated_at': datetime.datetime(
|
||||
2012, 10, 29, 13, 42, 2)},
|
||||
{'binary': 'cinder-volume',
|
||||
'cluster': None,
|
||||
'host': 'host1', 'zone': 'cinder',
|
||||
'status': 'disabled', 'state': 'up',
|
||||
'updated_at': datetime.datetime(
|
||||
2012, 10, 29, 13, 42, 5)},
|
||||
{'binary': 'cinder-scheduler',
|
||||
'cluster': 'cluster1',
|
||||
'host': 'host2',
|
||||
'zone': 'cinder',
|
||||
'status': 'enabled', 'state': 'down',
|
||||
'updated_at': datetime.datetime(
|
||||
2012, 9, 19, 6, 55, 34)},
|
||||
{'binary': 'cinder-volume',
|
||||
'cluster': 'cluster1',
|
||||
'host': 'host2',
|
||||
'zone': 'cinder',
|
||||
'status': 'disabled', 'state': 'down',
|
||||
'updated_at': datetime.datetime(
|
||||
2012, 9, 18, 8, 3, 38)},
|
||||
{'binary': 'cinder-volume',
|
||||
'cluster': 'cluster2',
|
||||
'host': 'host2',
|
||||
'zone': 'cinder',
|
||||
'status': 'disabled', 'state': 'down',
|
||||
'updated_at': datetime.datetime(
|
||||
2012, 10, 29, 13, 42, 5)},
|
||||
{'binary': 'cinder-volume',
|
||||
'cluster': 'cluster2',
|
||||
'host': 'host2',
|
||||
'zone': 'cinder',
|
||||
'status': 'enabled', 'state': 'down',
|
||||
'updated_at': datetime.datetime(
|
||||
2012, 9, 18, 8, 3, 38)},
|
||||
{'binary': 'cinder-scheduler',
|
||||
'cluster': None,
|
||||
'host': 'host2',
|
||||
'zone': 'cinder',
|
||||
'status': 'enabled', 'state': 'down',
|
||||
'updated_at': None},
|
||||
]}
|
||||
self.assertEqual(response, res_dict)
|
||||
|
||||
def test_services_detail(self):
|
||||
self.ext_mgr.extensions['os-extended-services'] = True
|
||||
self.controller = services.ServiceController(self.ext_mgr)
|
||||
|
251
cinder/tests/unit/api/v3/test_cluster.py
Normal file
251
cinder/tests/unit/api/v3/test_cluster.py
Normal file
@ -0,0 +1,251 @@
|
||||
# Copyright (c) 2016 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import datetime
|
||||
|
||||
import ddt
|
||||
from iso8601 import iso8601
|
||||
import mock
|
||||
|
||||
from cinder.api import extensions
|
||||
from cinder.api.openstack import api_version_request as api_version
|
||||
from cinder.api.v3 import clusters
|
||||
from cinder import context
|
||||
from cinder import exception
|
||||
from cinder import test
|
||||
from cinder.tests.unit import fake_cluster
|
||||
|
||||
|
||||
CLUSTERS = [
|
||||
fake_cluster.fake_db_cluster(
|
||||
id=1,
|
||||
last_heartbeat=datetime.datetime(2016, 6, 1, 2, 46, 28),
|
||||
updated_at=datetime.datetime(2016, 6, 1, 2, 46, 28),
|
||||
created_at=datetime.datetime(2016, 6, 1, 2, 46, 28)),
|
||||
fake_cluster.fake_db_cluster(
|
||||
id=2, name='cluster2', num_hosts=2, num_down_hosts=1, disabled=True,
|
||||
updated_at=datetime.datetime(2016, 6, 1, 1, 46, 28),
|
||||
created_at=datetime.datetime(2016, 6, 1, 1, 46, 28))
|
||||
]
|
||||
|
||||
CLUSTERS_ORM = [fake_cluster.fake_cluster_orm(**kwargs) for kwargs in CLUSTERS]
|
||||
|
||||
EXPECTED = [{'created_at': datetime.datetime(2016, 6, 1, 2, 46, 28),
|
||||
'disabled_reason': None,
|
||||
'last_heartbeat': datetime.datetime(2016, 6, 1, 2, 46, 28),
|
||||
'name': 'cluster_name',
|
||||
'binary': 'cinder-volume',
|
||||
'num_down_hosts': 0,
|
||||
'num_hosts': 0,
|
||||
'state': 'up',
|
||||
'status': 'enabled',
|
||||
'updated_at': datetime.datetime(2016, 6, 1, 2, 46, 28)},
|
||||
{'created_at': datetime.datetime(2016, 6, 1, 1, 46, 28),
|
||||
'updated_at': datetime.datetime(2016, 6, 1, 1, 46, 28),
|
||||
'disabled_reason': None,
|
||||
'last_heartbeat': '',
|
||||
'name': 'cluster2',
|
||||
'binary': 'cinder-volume',
|
||||
'num_down_hosts': 1,
|
||||
'num_hosts': 2,
|
||||
'state': 'down',
|
||||
'status': 'disabled',
|
||||
'updated_at': datetime.datetime(2016, 6, 1, 1, 46, 28)}]
|
||||
|
||||
|
||||
class FakeRequest(object):
|
||||
def __init__(self, is_admin=True, version='3.7', **kwargs):
|
||||
self.GET = kwargs
|
||||
self.headers = {'OpenStack-API-Version': 'volume ' + version}
|
||||
self.api_version_request = api_version.APIVersionRequest(version)
|
||||
self.environ = {
|
||||
'cinder.context': context.RequestContext(user_id=None,
|
||||
project_id=None,
|
||||
is_admin=is_admin,
|
||||
read_deleted='no',
|
||||
overwrite=False)
|
||||
}
|
||||
|
||||
|
||||
def fake_utcnow(with_timezone=False):
|
||||
tzinfo = iso8601.Utc() if with_timezone else None
|
||||
return datetime.datetime(2016, 6, 1, 2, 46, 30, tzinfo=tzinfo)
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
@mock.patch('oslo_utils.timeutils.utcnow', fake_utcnow)
|
||||
class ClustersTestCase(test.TestCase):
|
||||
"""Test Case for Clusters."""
|
||||
LIST_FILTERS = ({}, {'is_up': True}, {'disabled': False}, {'num_hosts': 2},
|
||||
{'num_down_hosts': 1}, {'binary': 'cinder-volume'},
|
||||
{'is_up': True, 'disabled': False, 'num_hosts': 2,
|
||||
'num_down_hosts': 1, 'binary': 'cinder-volume'})
|
||||
|
||||
def setUp(self):
|
||||
super(ClustersTestCase, self).setUp()
|
||||
|
||||
self.context = context.get_admin_context()
|
||||
self.ext_mgr = extensions.ExtensionManager()
|
||||
self.ext_mgr.extensions = {}
|
||||
self.controller = clusters.ClusterController(self.ext_mgr)
|
||||
|
||||
@mock.patch('cinder.db.cluster_get_all', return_value=CLUSTERS_ORM)
|
||||
def _test_list(self, get_all_mock, detailed, filters, expected=None):
|
||||
req = FakeRequest(**filters)
|
||||
method = getattr(self.controller, 'detail' if detailed else 'index')
|
||||
clusters = method(req)
|
||||
|
||||
filters = filters.copy()
|
||||
filters.setdefault('is_up', None)
|
||||
filters.setdefault('read_deleted', 'no')
|
||||
self.assertEqual(expected, clusters)
|
||||
get_all_mock.assert_called_once_with(
|
||||
req.environ['cinder.context'],
|
||||
get_services=False,
|
||||
services_summary=detailed,
|
||||
**filters)
|
||||
|
||||
@ddt.data(*LIST_FILTERS)
|
||||
def test_index_detail(self, filters):
|
||||
"""Verify that we get all clusters with detailed data."""
|
||||
expected = {'clusters': EXPECTED}
|
||||
self._test_list(detailed=True, filters=filters, expected=expected)
|
||||
|
||||
@ddt.data(*LIST_FILTERS)
|
||||
def test_index_summary(self, filters):
|
||||
"""Verify that we get all clusters with summary data."""
|
||||
expected = {'clusters': [{'name': 'cluster_name',
|
||||
'binary': 'cinder-volume',
|
||||
'state': 'up',
|
||||
'status': 'enabled'},
|
||||
{'name': 'cluster2',
|
||||
'binary': 'cinder-volume',
|
||||
'state': 'down',
|
||||
'status': 'disabled'}]}
|
||||
self._test_list(detailed=False, filters=filters, expected=expected)
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_index_unauthorized(self, detailed):
|
||||
"""Verify that unauthorized user can't list clusters."""
|
||||
self.assertRaises(exception.PolicyNotAuthorized,
|
||||
self._test_list, detailed=detailed,
|
||||
filters={'is_admin': False})
|
||||
|
||||
@ddt.data(True, False)
|
||||
def test_index_wrong_version(self, detailed):
|
||||
"""Verify that unauthorized user can't list clusters."""
|
||||
self.assertRaises(exception.VersionNotFoundForAPIMethod,
|
||||
self._test_list, detailed=detailed,
|
||||
filters={'version': '3.5'})
|
||||
|
||||
@mock.patch('cinder.db.sqlalchemy.api.cluster_get',
|
||||
return_value=CLUSTERS_ORM[0])
|
||||
def test_show(self, get_mock):
|
||||
req = FakeRequest()
|
||||
expected = {'cluster': EXPECTED[0]}
|
||||
cluster = self.controller.show(req, mock.sentinel.name,
|
||||
mock.sentinel.binary)
|
||||
self.assertEqual(expected, cluster)
|
||||
get_mock.assert_called_once_with(
|
||||
req.environ['cinder.context'],
|
||||
None,
|
||||
services_summary=True,
|
||||
name=mock.sentinel.name,
|
||||
binary=mock.sentinel.binary)
|
||||
|
||||
def test_show_unauthorized(self):
|
||||
req = FakeRequest(is_admin=False)
|
||||
self.assertRaises(exception.PolicyNotAuthorized,
|
||||
self.controller.show, req, 'name')
|
||||
|
||||
def test_show_wrong_version(self):
|
||||
req = FakeRequest(version='3.5')
|
||||
self.assertRaises(exception.VersionNotFoundForAPIMethod,
|
||||
self.controller.show, req, 'name')
|
||||
|
||||
@mock.patch('cinder.db.sqlalchemy.api.cluster_update')
|
||||
@mock.patch('cinder.db.sqlalchemy.api.cluster_get',
|
||||
return_value=CLUSTERS_ORM[1])
|
||||
def test_update_enable(self, get_mock, update_mock):
|
||||
req = FakeRequest()
|
||||
expected = {'cluster': {'name': u'cluster2',
|
||||
'binary': 'cinder-volume',
|
||||
'state': 'down',
|
||||
'status': 'enabled',
|
||||
'disabled_reason': None}}
|
||||
res = self.controller.update(req, 'enable',
|
||||
{'name': mock.sentinel.name,
|
||||
'binary': mock.sentinel.binary})
|
||||
self.assertEqual(expected, res)
|
||||
ctxt = req.environ['cinder.context']
|
||||
get_mock.assert_called_once_with(ctxt,
|
||||
None, binary=mock.sentinel.binary,
|
||||
name=mock.sentinel.name)
|
||||
update_mock.assert_called_once_with(ctxt, get_mock.return_value.id,
|
||||
{'disabled': False,
|
||||
'disabled_reason': None})
|
||||
|
||||
@mock.patch('cinder.db.sqlalchemy.api.cluster_update')
|
||||
@mock.patch('cinder.db.sqlalchemy.api.cluster_get',
|
||||
return_value=CLUSTERS_ORM[0])
|
||||
def test_update_disable(self, get_mock, update_mock):
|
||||
req = FakeRequest()
|
||||
disabled_reason = 'For testing'
|
||||
expected = {'cluster': {'name': u'cluster_name',
|
||||
'state': 'up',
|
||||
'binary': 'cinder-volume',
|
||||
'status': 'disabled',
|
||||
'disabled_reason': disabled_reason}}
|
||||
res = self.controller.update(req, 'disable',
|
||||
{'name': mock.sentinel.name,
|
||||
'binary': mock.sentinel.binary,
|
||||
'disabled_reason': disabled_reason})
|
||||
self.assertEqual(expected, res)
|
||||
ctxt = req.environ['cinder.context']
|
||||
get_mock.assert_called_once_with(ctxt,
|
||||
None, binary=mock.sentinel.binary,
|
||||
name=mock.sentinel.name)
|
||||
update_mock.assert_called_once_with(
|
||||
ctxt, get_mock.return_value.id,
|
||||
{'disabled': True, 'disabled_reason': disabled_reason})
|
||||
|
||||
def test_update_wrong_action(self):
|
||||
req = FakeRequest()
|
||||
self.assertRaises(exception.NotFound, self.controller.update, req,
|
||||
'action', {})
|
||||
|
||||
@ddt.data('enable', 'disable')
|
||||
def test_update_missing_name(self, action):
|
||||
req = FakeRequest()
|
||||
self.assertRaises(exception.MissingRequired, self.controller.update,
|
||||
req, action, {'binary': mock.sentinel.binary})
|
||||
|
||||
def test_update_wrong_disabled_reason(self):
|
||||
req = FakeRequest()
|
||||
self.assertRaises(exception.InvalidInput, self.controller.update, req,
|
||||
'disable', {'name': mock.sentinel.name,
|
||||
'disabled_reason': ' '})
|
||||
|
||||
@ddt.data('enable', 'disable')
|
||||
def test_update_unauthorized(self, action):
|
||||
req = FakeRequest(is_admin=False)
|
||||
self.assertRaises(exception.PolicyNotAuthorized,
|
||||
self.controller.update, req, action, {})
|
||||
|
||||
@ddt.data('enable', 'disable')
|
||||
def test_update_wrong_version(self, action):
|
||||
req = FakeRequest(version='3.5')
|
||||
self.assertRaises(exception.VersionNotFoundForAPIMethod,
|
||||
self.controller.update, req, action, {})
|
@ -116,5 +116,9 @@
|
||||
|
||||
"message:delete": "rule:admin_or_owner",
|
||||
"message:get": "rule:admin_or_owner",
|
||||
"message:get_all": "rule:admin_or_owner"
|
||||
"message:get_all": "rule:admin_or_owner",
|
||||
|
||||
"clusters:get": "rule:admin_api",
|
||||
"clusters:get_all": "rule:admin_api",
|
||||
"clusters:update": "rule:admin_api"
|
||||
}
|
||||
|
@ -111,5 +111,9 @@
|
||||
"scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api",
|
||||
"message:delete": "rule:admin_or_owner",
|
||||
"message:get": "rule:admin_or_owner",
|
||||
"message:get_all": "rule:admin_or_owner"
|
||||
"message:get_all": "rule:admin_or_owner",
|
||||
|
||||
"clusters:get": "rule:admin_api",
|
||||
"clusters:get_all": "rule:admin_api",
|
||||
"clusters:update": "rule:admin_api",
|
||||
}
|
||||
|
@ -14,3 +14,7 @@ features:
|
||||
listings."
|
||||
- "HA A-A: Added cluster subcommand in manage command to list, remove, and
|
||||
rename clusters."
|
||||
- "HA A-A: Added clusters API endpoints for cluster related operations (index,
|
||||
detail, show, enable/disable). Index and detail accept filtering by
|
||||
`name`, `binary`, `disabled`, `num_hosts`, `num_down_hosts`, and up/down
|
||||
status (`is_up`) as URL parameters. Also added their respective policies."
|
||||
|
Loading…
Reference in New Issue
Block a user