Merge "Allow scheduler pool information to be retrieved"
This commit is contained in:
commit
6643ce048a
70
cinder/api/contrib/scheduler_stats.py
Normal file
70
cinder/api/contrib/scheduler_stats.py
Normal file
@ -0,0 +1,70 @@
|
||||
# Copyright (c) 2014 eBay Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""The Scheduler Stats extension"""
|
||||
|
||||
from cinder.api import extensions
|
||||
from cinder.api.openstack import wsgi
|
||||
from cinder.api.views import scheduler_stats as scheduler_stats_view
|
||||
from cinder.openstack.common import log as logging
|
||||
from cinder.scheduler import rpcapi
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def authorize(context, action_name):
|
||||
action = 'scheduler_stats:%s' % action_name
|
||||
extensions.extension_authorizer('scheduler', action)(context)
|
||||
|
||||
|
||||
class SchedulerStatsController(wsgi.Controller):
|
||||
"""The Scheduler Stats controller for the OpenStack API."""
|
||||
|
||||
_view_builder_class = scheduler_stats_view.ViewBuilder
|
||||
|
||||
def __init__(self):
|
||||
self.scheduler_api = rpcapi.SchedulerAPI()
|
||||
super(SchedulerStatsController, self).__init__()
|
||||
|
||||
def get_pools(self, req):
|
||||
"""List all active pools in scheduler."""
|
||||
context = req.environ['cinder.context']
|
||||
authorize(context, 'get_pools')
|
||||
|
||||
#TODO(zhiteng) Add filters support
|
||||
detail = req.params.get('detail', False)
|
||||
pools = self.scheduler_api.get_pools(context, filters=None)
|
||||
|
||||
return self._view_builder.pools(req, pools, detail)
|
||||
|
||||
|
||||
class Scheduler_stats(extensions.ExtensionDescriptor):
|
||||
"""Scheduler stats support."""
|
||||
|
||||
name = "Scheduler_stats"
|
||||
alias = "scheduler-stats"
|
||||
namespace = "http://docs.openstack.org/volume/ext/scheduler-stats/api/v1"
|
||||
updated = "2014-09-07T00:00:00+00:00"
|
||||
|
||||
def get_resources(self):
|
||||
resources = []
|
||||
res = extensions.ResourceExtension(
|
||||
Scheduler_stats.alias,
|
||||
SchedulerStatsController(),
|
||||
collection_actions={"get_pools": "GET"})
|
||||
|
||||
resources.append(res)
|
||||
|
||||
return resources
|
53
cinder/api/views/scheduler_stats.py
Normal file
53
cinder/api/views/scheduler_stats.py
Normal file
@ -0,0 +1,53 @@
|
||||
# Copyright (C) 2014 eBay Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from cinder.api import common
|
||||
|
||||
|
||||
class ViewBuilder(common.ViewBuilder):
|
||||
"""Model scheduler-stats API responses as a python dictionary."""
|
||||
|
||||
_collection_name = "scheduler-stats"
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize view builder."""
|
||||
super(ViewBuilder, self).__init__()
|
||||
|
||||
def summary(self, request, pool):
|
||||
"""Detailed view of a single pool."""
|
||||
return {
|
||||
'pool': {
|
||||
'name': pool.get('name'),
|
||||
}
|
||||
}
|
||||
|
||||
def detail(self, request, pool):
|
||||
"""Detailed view of a single pool."""
|
||||
return {
|
||||
'pool': {
|
||||
'name': pool.get('name'),
|
||||
'capabilities': pool.get('capabilities'),
|
||||
}
|
||||
}
|
||||
|
||||
def pools(self, request, pools, detail):
|
||||
"""Detailed view of a list of pools seen by scheduler."""
|
||||
if detail:
|
||||
plist = [self.detail(request, pool)['pool'] for pool in pools]
|
||||
else:
|
||||
plist = [self.summary(request, pool)['pool'] for pool in pools]
|
||||
pools_dict = dict(pools=plist)
|
||||
|
||||
return pools_dict
|
@ -98,3 +98,8 @@ class Scheduler(object):
|
||||
"""Must override schedule method for scheduler to work."""
|
||||
raise NotImplementedError(_(
|
||||
"Must implement schedule_create_consistencygroup"))
|
||||
|
||||
def get_pools(self, context, filters):
|
||||
"""Must override schedule method for scheduler to work."""
|
||||
raise NotImplementedError(_(
|
||||
"Must implement schedule_get_pools"))
|
||||
|
@ -168,6 +168,10 @@ class FilterScheduler(driver.Scheduler):
|
||||
top_host = self._choose_top_host(weighed_hosts, request_spec)
|
||||
return top_host.obj
|
||||
|
||||
def get_pools(self, context, filters):
|
||||
#TODO(zhiteng) Add filters support
|
||||
return self.host_manager.get_pools(context)
|
||||
|
||||
def _post_select_populate_filter_properties(self, filter_properties,
|
||||
host_state):
|
||||
"""Add additional information to the filter properties after a host has
|
||||
|
@ -486,3 +486,18 @@ class HostManager(object):
|
||||
all_pools[pool_key] = pool
|
||||
|
||||
return all_pools.itervalues()
|
||||
|
||||
def get_pools(self, context):
|
||||
"""Returns a dict of all pools on all hosts HostManager knows about."""
|
||||
|
||||
all_pools = []
|
||||
for host, state in self.host_state_map.items():
|
||||
for key in state.pools:
|
||||
pool = state.pools[key]
|
||||
# use host.pool_name to make sure key is unique
|
||||
pool_key = vol_utils.append_host(host, pool.pool_name)
|
||||
new_pool = dict(name=pool_key)
|
||||
new_pool.update(dict(capabilities=pool.capabilities))
|
||||
all_pools.append(new_pool)
|
||||
|
||||
return all_pools
|
||||
|
@ -53,7 +53,7 @@ LOG = logging.getLogger(__name__)
|
||||
class SchedulerManager(manager.Manager):
|
||||
"""Chooses a host to create volumes."""
|
||||
|
||||
RPC_API_VERSION = '1.6'
|
||||
RPC_API_VERSION = '1.7'
|
||||
|
||||
target = messaging.Target(version=RPC_API_VERSION)
|
||||
|
||||
@ -240,6 +240,10 @@ class SchedulerManager(manager.Manager):
|
||||
volume_rpcapi.VolumeAPI().manage_existing(context, volume_ref,
|
||||
request_spec.get('ref'))
|
||||
|
||||
def get_pools(self, context, filters=None):
|
||||
"""Get active pools from scheduler's cache."""
|
||||
return self.driver.get_pools(context, filters)
|
||||
|
||||
def _set_volume_state_and_notify(self, method, updates, context, ex,
|
||||
request_spec, msg=None):
|
||||
# TODO(harlowja): move into a task that just does this later.
|
||||
|
@ -39,6 +39,7 @@ class SchedulerAPI(object):
|
||||
1.4 - Add retype method
|
||||
1.5 - Add manage_existing method
|
||||
1.6 - Add create_consistencygroup method
|
||||
1.7 - Add get_active_pools method
|
||||
'''
|
||||
|
||||
RPC_API_VERSION = '1.0'
|
||||
@ -47,7 +48,7 @@ class SchedulerAPI(object):
|
||||
super(SchedulerAPI, self).__init__()
|
||||
target = messaging.Target(topic=CONF.scheduler_topic,
|
||||
version=self.RPC_API_VERSION)
|
||||
self.client = rpc.get_client(target, version_cap='1.6')
|
||||
self.client = rpc.get_client(target, version_cap='1.7')
|
||||
|
||||
def create_consistencygroup(self, ctxt, topic, group_id,
|
||||
request_spec_list=None,
|
||||
@ -114,6 +115,11 @@ class SchedulerAPI(object):
|
||||
request_spec=request_spec_p,
|
||||
filter_properties=filter_properties)
|
||||
|
||||
def get_pools(self, ctxt, filters=None):
|
||||
cctxt = self.client.prepare(version='1.7')
|
||||
return cctxt.call(ctxt, 'get_pools',
|
||||
filters=filters)
|
||||
|
||||
def update_service_capabilities(self, ctxt,
|
||||
service_name, host,
|
||||
capabilities):
|
||||
|
110
cinder/tests/api/contrib/test_scheduler_stats.py
Normal file
110
cinder/tests/api/contrib/test_scheduler_stats.py
Normal file
@ -0,0 +1,110 @@
|
||||
# Copyright 2013 eBay Inc.
|
||||
# Copyright 2013 OpenStack Foundation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import mock
|
||||
|
||||
from cinder.api.contrib import scheduler_stats
|
||||
from cinder import context
|
||||
from cinder import test
|
||||
from cinder.tests.api import fakes
|
||||
|
||||
|
||||
def schedule_rpcapi_get_pools(self, context, filters=None):
|
||||
all_pools = []
|
||||
pool1 = dict(name='pool1',
|
||||
capabilities=dict(
|
||||
total_capacity=1024, free_capacity=100,
|
||||
volume_backend_name='pool1', reserved_percentage=0,
|
||||
driver_version='1.0.0', storage_protocol='iSCSI',
|
||||
QoS_support='False', updated=None))
|
||||
all_pools.append(pool1)
|
||||
pool2 = dict(name='pool2',
|
||||
capabilities=dict(
|
||||
total_capacity=512, free_capacity=200,
|
||||
volume_backend_name='pool2', reserved_percentage=0,
|
||||
driver_version='1.0.1', storage_protocol='iSER',
|
||||
QoS_support='True', updated=None))
|
||||
all_pools.append(pool2)
|
||||
|
||||
return all_pools
|
||||
|
||||
|
||||
@mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.get_pools',
|
||||
schedule_rpcapi_get_pools)
|
||||
class SchedulerStatsAPITest(test.TestCase):
|
||||
def setUp(self):
|
||||
super(SchedulerStatsAPITest, self).setUp()
|
||||
self.flags(host='fake')
|
||||
self.controller = scheduler_stats.SchedulerStatsController()
|
||||
self.ctxt = context.RequestContext('admin', 'fake', True)
|
||||
|
||||
def test_get_pools_summery(self):
|
||||
req = fakes.HTTPRequest.blank('/v2/fake/scheduler_stats')
|
||||
req.environ['cinder.context'] = self.ctxt
|
||||
res = self.controller.get_pools(req)
|
||||
|
||||
self.assertEqual(2, len(res['pools']))
|
||||
|
||||
expected = {
|
||||
'pools': [
|
||||
{
|
||||
'name': 'pool1',
|
||||
},
|
||||
{
|
||||
'name': 'pool2',
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
self.assertDictMatch(res, expected)
|
||||
|
||||
def test_get_pools_detail(self):
|
||||
req = fakes.HTTPRequest.blank('/v2/fake/scheduler_stats?detail=True')
|
||||
req.environ['cinder.context'] = self.ctxt
|
||||
res = self.controller.get_pools(req)
|
||||
|
||||
self.assertEqual(2, len(res['pools']))
|
||||
|
||||
expected = {
|
||||
'pools': [
|
||||
{
|
||||
'name': 'pool1',
|
||||
'capabilities': {
|
||||
'updated': None,
|
||||
'total_capacity': 1024,
|
||||
'free_capacity': 100,
|
||||
'volume_backend_name': 'pool1',
|
||||
'reserved_percentage': 0,
|
||||
'driver_version': '1.0.0',
|
||||
'storage_protocol': 'iSCSI',
|
||||
'QoS_support': 'False', }
|
||||
},
|
||||
{
|
||||
'name': 'pool2',
|
||||
'capabilities': {
|
||||
'updated': None,
|
||||
'total_capacity': 512,
|
||||
'free_capacity': 200,
|
||||
'volume_backend_name': 'pool2',
|
||||
'reserved_percentage': 0,
|
||||
'driver_version': '1.0.1',
|
||||
'storage_protocol': 'iSER',
|
||||
'QoS_support': 'True', }
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
self.assertDictMatch(res, expected)
|
@ -89,5 +89,7 @@
|
||||
"consistencygroup:create_cgsnapshot" : "",
|
||||
"consistencygroup:delete_cgsnapshot": "",
|
||||
"consistencygroup:get_cgsnapshot": "",
|
||||
"consistencygroup:get_all_cgsnapshots": ""
|
||||
"consistencygroup:get_all_cgsnapshots": "",
|
||||
|
||||
"scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api"
|
||||
}
|
||||
|
@ -188,6 +188,89 @@ class HostManagerTestCase(test.TestCase):
|
||||
self.assertEqual(host_state_map[host].service,
|
||||
volume_node)
|
||||
|
||||
@mock.patch('cinder.db.service_get_all_by_topic')
|
||||
@mock.patch('cinder.utils.service_is_up')
|
||||
def test_get_pools(self, _mock_service_is_up,
|
||||
_mock_service_get_all_by_topic):
|
||||
context = 'fake_context'
|
||||
|
||||
services = [
|
||||
dict(id=1, host='host1', topic='volume', disabled=False,
|
||||
availability_zone='zone1', updated_at=timeutils.utcnow()),
|
||||
dict(id=2, host='host2@back1', topic='volume', disabled=False,
|
||||
availability_zone='zone1', updated_at=timeutils.utcnow()),
|
||||
dict(id=3, host='host2@back2', topic='volume', disabled=False,
|
||||
availability_zone='zone2', updated_at=timeutils.utcnow()),
|
||||
]
|
||||
|
||||
mocked_service_states = {
|
||||
'host1': dict(volume_backend_name='AAA',
|
||||
total_capacity_gb=512, free_capacity_gb=200,
|
||||
timestamp=None, reserved_percentage=0),
|
||||
'host2@back1': dict(volume_backend_name='BBB',
|
||||
total_capacity_gb=256, free_capacity_gb=100,
|
||||
timestamp=None, reserved_percentage=0),
|
||||
'host2@back2': dict(volume_backend_name='CCC',
|
||||
total_capacity_gb=10000, free_capacity_gb=700,
|
||||
timestamp=None, reserved_percentage=0),
|
||||
}
|
||||
|
||||
_mock_service_get_all_by_topic.return_value = services
|
||||
_mock_service_is_up.return_value = True
|
||||
_mock_warning = mock.Mock()
|
||||
host_manager.LOG.warn = _mock_warning
|
||||
|
||||
with mock.patch.dict(self.host_manager.service_states,
|
||||
mocked_service_states):
|
||||
# call get_all_host_states to populate host_state_map
|
||||
self.host_manager.get_all_host_states(context)
|
||||
|
||||
res = self.host_manager.get_pools(context)
|
||||
|
||||
# check if get_pools returns all 3 pools
|
||||
self.assertEqual(3, len(res))
|
||||
|
||||
expected = [
|
||||
{
|
||||
'name': 'host1#AAA',
|
||||
'capabilities': {
|
||||
'timestamp': None,
|
||||
'volume_backend_name': 'AAA',
|
||||
'free_capacity_gb': 200,
|
||||
'driver_version': None,
|
||||
'total_capacity_gb': 512,
|
||||
'reserved_percentage': 0,
|
||||
'vendor_name': None,
|
||||
'storage_protocol': None},
|
||||
},
|
||||
{
|
||||
'name': 'host2@back1#BBB',
|
||||
'capabilities': {
|
||||
'timestamp': None,
|
||||
'volume_backend_name': 'BBB',
|
||||
'free_capacity_gb': 100,
|
||||
'driver_version': None,
|
||||
'total_capacity_gb': 256,
|
||||
'reserved_percentage': 0,
|
||||
'vendor_name': None,
|
||||
'storage_protocol': None},
|
||||
},
|
||||
{
|
||||
'name': 'host2@back2#CCC',
|
||||
'capabilities': {
|
||||
'timestamp': None,
|
||||
'volume_backend_name': 'CCC',
|
||||
'free_capacity_gb': 700,
|
||||
'driver_version': None,
|
||||
'total_capacity_gb': 10000,
|
||||
'reserved_percentage': 0,
|
||||
'vendor_name': None,
|
||||
'storage_protocol': None},
|
||||
}
|
||||
]
|
||||
self.assertEqual(len(expected), len(res))
|
||||
self.assertEqual(sorted(expected), sorted(res))
|
||||
|
||||
|
||||
class HostStateTestCase(test.TestCase):
|
||||
"""Test case for HostState class."""
|
||||
|
@ -123,3 +123,9 @@ class SchedulerRpcAPITestCase(test.TestCase):
|
||||
request_spec='fake_request_spec',
|
||||
filter_properties='filter_properties',
|
||||
version='1.5')
|
||||
|
||||
def test_get_pools(self):
|
||||
self._test_scheduler_api('get_pools',
|
||||
rpc_method='call',
|
||||
filters=None,
|
||||
version='1.7')
|
||||
|
@ -74,5 +74,7 @@
|
||||
"consistencygroup:create_cgsnapshot" : "",
|
||||
"consistencygroup:delete_cgsnapshot": "",
|
||||
"consistencygroup:get_cgsnapshot": "",
|
||||
"consistencygroup:get_all_cgsnapshots": ""
|
||||
"consistencygroup:get_all_cgsnapshots": "",
|
||||
|
||||
"scheduler_extension:scheduler_stats:get_pools" : "rule:admin_api"
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user