diff --git a/cinder/api/contrib/resource_common_manage.py b/cinder/api/contrib/resource_common_manage.py index ebf20332264..ca11172c860 100644 --- a/cinder/api/contrib/resource_common_manage.py +++ b/cinder/api/contrib/resource_common_manage.py @@ -21,11 +21,7 @@ def get_manageable_resources(req, is_detail, function_get_manageable, view_builder): context = req.environ['cinder.context'] params = req.params.copy() - host = params.get('host') - if host is None: - raise exception.InvalidHost( - reason=_("Host must be specified in query parameters")) - + cluster_name, host = common.get_cluster_host(req, params, '3.17') marker, limit, offset = common.get_pagination_params(params) sort_keys, sort_dirs = common.get_sort_params(params, default_key='reference') @@ -43,9 +39,9 @@ def get_manageable_resources(req, is_detail, function_get_manageable, msg = _("Invalid sort dirs passed: %s") % ', '.join(invalid_dirs) raise exception.InvalidParameterValue(err=msg) - resources = function_get_manageable(context, host, marker=marker, - limit=limit, offset=offset, - sort_keys=sort_keys, + resources = function_get_manageable(context, host, cluster_name, + marker=marker, limit=limit, + offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) resource_count = len(resources) diff --git a/cinder/api/openstack/api_version_request.py b/cinder/api/openstack/api_version_request.py index 6f118231882..ae36fba8b7d 100644 --- a/cinder/api/openstack/api_version_request.py +++ b/cinder/api/openstack/api_version_request.py @@ -65,6 +65,7 @@ REST_API_VERSION_HISTORY = """ * 3.15 - Inject the response's `Etag` header to avoid the lost update problem with volume metadata. * 3.16 - Migrate volume now supports cluster + * 3.17 - Getting manageable volumes and snapshots now accepts cluster. """ # The minimum and maximum versions of the API supported @@ -72,7 +73,7 @@ REST_API_VERSION_HISTORY = """ # minimum version of the API supported. # Explicitly using /v1 or /v2 enpoints will still work _MIN_API_VERSION = "3.0" -_MAX_API_VERSION = "3.16" +_MAX_API_VERSION = "3.17" _LEGACY_API_VERSION1 = "1.0" _LEGACY_API_VERSION2 = "2.0" diff --git a/cinder/api/openstack/rest_api_version_history.rst b/cinder/api/openstack/rest_api_version_history.rst index ab0309901f4..33adba020a0 100644 --- a/cinder/api/openstack/rest_api_version_history.rst +++ b/cinder/api/openstack/rest_api_version_history.rst @@ -200,3 +200,9 @@ user documentation. that specific cluster. Only ``host`` or ``cluster`` can be provided. Creating a managed volume also supports the cluster parameter. + +3.17 +---- + os-snapshot-manage and os-volume-manage now support ``cluster`` parameter on + listings (summay and detailed). Both location parameters, ``cluster`` and + ``host`` are exclusive and only one should be provided. diff --git a/cinder/api/v3/resource_common_manage.py b/cinder/api/v3/resource_common_manage.py new file mode 100644 index 00000000000..4265fca3df7 --- /dev/null +++ b/cinder/api/v3/resource_common_manage.py @@ -0,0 +1,83 @@ +# Copyright (c) 2016 Red Hat, Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cinder.api import common +from cinder.api import extensions +from cinder.api.openstack import wsgi +from cinder import exception +from cinder.i18n import _ + + +class ManageResource(object): + """Mixin class for v3 of ManageVolume and ManageSnapshot. + + It requires that any class inheriting from this one has `volume_api` and + `_list_manageable_view` attributes. + """ + VALID_SORT_KEYS = {'reference', 'size'} + VALID_SORT_DIRS = {'asc', 'desc'} + + def _set_resource_type(self, resource): + self._authorizer = extensions.extension_authorizer(resource, + 'list_manageable') + self.get_manageable = getattr(self.volume_api, + 'get_manageable_%ss' % resource) + + def _ensure_min_version(self, req, allowed_version): + version = req.api_version_request + if not version.matches(allowed_version, None): + raise exception.VersionNotFoundForAPIMethod(version=version) + + def _get_resources(self, req, is_detail): + self._ensure_min_version(req, '3.8') + + context = req.environ['cinder.context'] + self._authorizer(context) + + params = req.params.copy() + cluster_name, host = common.get_cluster_host(req, params, '3.17') + marker, limit, offset = common.get_pagination_params(params) + sort_keys, sort_dirs = common.get_sort_params(params, + default_key='reference') + + # These parameters are generally validated at the DB layer, but in this + # case sorting is not done by the DB + invalid_keys = set(sort_keys).difference(self.VALID_SORT_KEYS) + if invalid_keys: + msg = _("Invalid sort keys passed: %s") % ', '.join(invalid_keys) + raise exception.InvalidParameterValue(err=msg) + + invalid_dirs = set(sort_dirs).difference(self.VALID_SORT_DIRS) + if invalid_dirs: + msg = _("Invalid sort dirs passed: %s") % ', '.join(invalid_dirs) + raise exception.InvalidParameterValue(err=msg) + + resources = self.get_manageable(context, host, cluster_name, + marker=marker, limit=limit, + offset=offset, sort_keys=sort_keys, + sort_dirs=sort_dirs) + view_builder = getattr(self._list_manageable_view, + 'detail_list' if is_detail else 'summary_list') + return view_builder(req, resources, len(resources)) + + @wsgi.extends + def index(self, req): + """Returns a summary list of volumes available to manage.""" + return self._get_resources(req, False) + + @wsgi.extends + def detail(self, req): + """Returns a detailed list of volumes available to manage.""" + return self._get_resources(req, True) diff --git a/cinder/api/v3/snapshot_manage.py b/cinder/api/v3/snapshot_manage.py index 4cd566712bf..4c842e32158 100644 --- a/cinder/api/v3/snapshot_manage.py +++ b/cinder/api/v3/snapshot_manage.py @@ -14,32 +14,20 @@ from cinder.api.contrib import snapshot_manage as snapshot_manage_v2 from cinder.api.openstack import wsgi -from cinder import exception +from cinder.api.v3 import resource_common_manage as common -class SnapshotManageController(snapshot_manage_v2.SnapshotManageController): - def _ensure_min_version(self, req, allowed_version): - version = req.api_version_request - if not version.matches(allowed_version, None): - raise exception.VersionNotFoundForAPIMethod(version=version) +class SnapshotManageController(common.ManageResource, + snapshot_manage_v2.SnapshotManageController): + def __init__(self, *args, **kwargs): + super(SnapshotManageController, self).__init__(*args, **kwargs) + self._set_resource_type('snapshot') @wsgi.response(202) def create(self, req, body): self._ensure_min_version(req, "3.8") return super(SnapshotManageController, self).create(req, body) - @wsgi.extends - def index(self, req): - """Returns a summary list of snapshots available to manage.""" - self._ensure_min_version(req, "3.8") - return super(SnapshotManageController, self).index(req) - - @wsgi.extends - def detail(self, req): - """Returns a detailed list of snapshots available to manage.""" - self._ensure_min_version(req, "3.8") - return super(SnapshotManageController, self).detail(req) - def create_resource(): return wsgi.Resource(SnapshotManageController()) diff --git a/cinder/api/v3/volume_manage.py b/cinder/api/v3/volume_manage.py index 9f0133e3537..d494c7576ef 100644 --- a/cinder/api/v3/volume_manage.py +++ b/cinder/api/v3/volume_manage.py @@ -14,32 +14,20 @@ from cinder.api.contrib import volume_manage as volume_manage_v2 from cinder.api.openstack import wsgi -from cinder import exception +from cinder.api.v3 import resource_common_manage as common -class VolumeManageController(volume_manage_v2.VolumeManageController): - def _ensure_min_version(self, req, allowed_version): - version = req.api_version_request - if not version.matches(allowed_version, None): - raise exception.VersionNotFoundForAPIMethod(version=version) +class VolumeManageController(common.ManageResource, + volume_manage_v2.VolumeManageController): + def __init__(self, *args, **kwargs): + super(VolumeManageController, self).__init__(*args, **kwargs) + self._set_resource_type('volume') @wsgi.response(202) def create(self, req, body): self._ensure_min_version(req, "3.8") return super(VolumeManageController, self).create(req, body) - @wsgi.extends - def index(self, req): - """Returns a summary list of volumes available to manage.""" - self._ensure_min_version(req, "3.8") - return super(VolumeManageController, self).index(req) - - @wsgi.extends - def detail(self, req): - """Returns a detailed list of volumes available to manage.""" - self._ensure_min_version(req, "3.8") - return super(VolumeManageController, self).detail(req) - def create_resource(): return wsgi.Resource(VolumeManageController()) diff --git a/cinder/objects/snapshot.py b/cinder/objects/snapshot.py index 48655973b5b..374677407b4 100644 --- a/cinder/objects/snapshot.py +++ b/cinder/objects/snapshot.py @@ -273,14 +273,14 @@ class SnapshotList(base.ObjectListBase, base.CinderObject): } @classmethod - def get_all(cls, context, search_opts, marker=None, limit=None, + def get_all(cls, context, filters, marker=None, limit=None, sort_keys=None, sort_dirs=None, offset=None): """Get all snapshot given some search_opts (filters). - Special search options accepted are host and cluster_name, that refer - to the volume's fields. + Special filters accepted are host and cluster_name, that refer to the + volume's fields. """ - snapshots = db.snapshot_get_all(context, search_opts, marker, limit, + snapshots = db.snapshot_get_all(context, filters, marker, limit, sort_keys, sort_dirs, offset) expected_attrs = Snapshot._get_expected_attrs(context) return base.obj_make_list(context, cls(context), objects.Snapshot, diff --git a/cinder/tests/unit/api/contrib/test_snapshot_manage.py b/cinder/tests/unit/api/contrib/test_snapshot_manage.py index 65daed472a8..bdf4cbea9e5 100644 --- a/cinder/tests/unit/api/contrib/test_snapshot_manage.py +++ b/cinder/tests/unit/api/contrib/test_snapshot_manage.py @@ -254,7 +254,7 @@ class SnapshotManageTest(test.TestCase): self.assertEqual(200, res.status_int) self.assertEqual(jsonutils.loads(res.body), exp) mock_api_manageable.assert_called_once_with( - self._admin_ctxt, 'fakehost', limit=CONF.osapi_max_limit, + self._admin_ctxt, 'fakehost', None, limit=CONF.osapi_max_limit, marker=None, offset=0, sort_dirs=['desc'], sort_keys=['reference']) @@ -277,8 +277,8 @@ class SnapshotManageTest(test.TestCase): self.assertEqual(200, res.status_int) self.assertEqual(jsonutils.loads(res.body), exp) mock_api_manageable.assert_called_once_with( - self._admin_ctxt, 'fakehost', limit=10, marker='1234', offset=4, - sort_dirs=['asc'], sort_keys=['reference']) + self._admin_ctxt, 'fakehost', None, limit=10, marker='1234', + offset=4, sort_dirs=['asc'], sort_keys=['reference']) @mock.patch('cinder.objects.service.Service.is_up', return_value=True) @mock.patch('cinder.db.sqlalchemy.api.service_get') diff --git a/cinder/tests/unit/api/contrib/test_volume_manage.py b/cinder/tests/unit/api/contrib/test_volume_manage.py index a7862ba6ca1..5cdb9af14fa 100644 --- a/cinder/tests/unit/api/contrib/test_volume_manage.py +++ b/cinder/tests/unit/api/contrib/test_volume_manage.py @@ -410,8 +410,8 @@ class VolumeManageTest(test.TestCase): self.assertEqual(200, res.status_int) self.assertEqual(exp, jsonutils.loads(res.body)) mock_api_manageable.assert_called_once_with( - self._admin_ctxt, 'fakehost', limit=10, marker='1234', offset=4, - sort_dirs=['asc'], sort_keys=['reference']) + self._admin_ctxt, 'fakehost', None, limit=10, marker='1234', + offset=4, sort_dirs=['asc'], sort_keys=['reference']) @mock.patch('cinder.volume.api.API.get_manageable_volumes', wraps=api_get_manageable_volumes) @@ -428,7 +428,7 @@ class VolumeManageTest(test.TestCase): self.assertEqual(200, res.status_int) self.assertEqual(exp, jsonutils.loads(res.body)) mock_api_manageable.assert_called_once_with( - self._admin_ctxt, 'fakehost', limit=CONF.osapi_max_limit, + self._admin_ctxt, 'fakehost', None, limit=CONF.osapi_max_limit, marker=None, offset=0, sort_dirs=['desc'], sort_keys=['reference']) diff --git a/cinder/tests/unit/api/v3/test_snapshot_manage.py b/cinder/tests/unit/api/v3/test_snapshot_manage.py index 56ce469a60c..f797bfe91bc 100644 --- a/cinder/tests/unit/api/v3/test_snapshot_manage.py +++ b/cinder/tests/unit/api/v3/test_snapshot_manage.py @@ -12,7 +12,9 @@ # License for the specific language governing permissions and limitations # under the License. +import ddt import mock +from oslo_config import cfg from oslo_serialization import jsonutils try: from urllib import urlencode @@ -22,6 +24,7 @@ import webob from cinder.api.v3 import router as router_v3 from cinder import context +from cinder import objects from cinder import test from cinder.tests.unit.api.contrib import test_snapshot_manage as test_contrib from cinder.tests.unit.api import fakes @@ -29,6 +32,9 @@ from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_service +CONF = cfg.CONF + + def app(): # no auth, just let environ['cinder.context'] pass through api = router_v3.APIRouter() @@ -37,6 +43,7 @@ def app(): return mapper +@ddt.ddt @mock.patch('cinder.volume.api.API.get', test_contrib.volume_get) class SnapshotManageTest(test.TestCase): """Test cases for cinder/api/v3/snapshot_manage.py""" @@ -82,9 +89,10 @@ class SnapshotManageTest(test.TestCase): res = self._get_resp_post(body, version="3.7") self.assertEqual(404, res.status_int, res) - def _get_resp_get(self, host, detailed, paging, version="3.8"): + def _get_resp_get(self, host, detailed, paging, version="3.8", **kwargs): """Helper to execute a GET os-snapshot-manage API call.""" - params = {'host': host} + params = {'host': host} if host else {} + params.update(kwargs) if paging: params.update({'marker': '1234', 'limit': 10, 'offset': 4, 'sort': 'reference:asc'}) @@ -132,3 +140,56 @@ class SnapshotManageTest(test.TestCase): def test_get_manageable_snapshots_detail_previous_version(self): res = self._get_resp_get('fakehost', True, True, version="3.7") self.assertEqual(404, res.status_int) + + @ddt.data((True, True, 'detail_list'), (True, False, 'summary_list'), + (False, True, 'detail_list'), (False, False, 'summary_list')) + @ddt.unpack + @mock.patch('cinder.objects.Service.is_up', True) + @mock.patch('cinder.volume.rpcapi.VolumeAPI._get_cctxt') + @mock.patch('cinder.objects.Service.get_by_id') + def test_get_manageable_detail(self, clustered, is_detail, view_method, + get_service_mock, get_cctxt_mock): + if clustered: + host = None + cluster_name = 'mycluster' + version = '3.17' + kwargs = {'cluster': cluster_name} + else: + host = 'fakehost' + cluster_name = None + version = '3.8' + kwargs = {} + service = objects.Service(disabled=False, host='fakehost', + cluster_name=cluster_name) + get_service_mock.return_value = service + snaps = [mock.sentinel.snap1, mock.sentinel.snap2] + get_cctxt_mock.return_value.call.return_value = snaps + + view_data = {'manageable-snapshots': [{'vol': 'mock.sentinel.snap1'}, + {'vol': 'mock.sentinel.snap2'}]} + view_path = ('cinder.api.views.manageable_snapshots.ViewBuilder.' + + view_method) + with mock.patch(view_path, return_value=view_data) as detail_view_mock: + res = self._get_resp_get(host, is_detail, False, version=version, + **kwargs) + + self.assertEqual(200, res.status_int) + get_cctxt_mock.assert_called_once_with(service.service_topic_queue) + get_cctxt_mock.return_value.call.assert_called_once_with( + mock.ANY, 'get_manageable_snapshots', marker=None, + limit=CONF.osapi_max_limit, offset=0, sort_keys=['reference'], + sort_dirs=['desc']) + detail_view_mock.assert_called_once_with(mock.ANY, snaps, len(snaps)) + get_service_mock.assert_called_once_with( + mock.ANY, None, host=host, binary='cinder-volume', + cluster_name=cluster_name) + + @ddt.data('3.8', '3.17') + def test_get_manageable_missing_host(self, version): + res = self._get_resp_get(None, True, False, version=version) + self.assertEqual(400, res.status_int) + + def test_get_manageable_both_host_cluster(self): + res = self._get_resp_get('host', True, False, version='3.17', + cluster='cluster') + self.assertEqual(400, res.status_int) diff --git a/cinder/tests/unit/api/v3/test_volume_manage.py b/cinder/tests/unit/api/v3/test_volume_manage.py index f1a6b1debc5..5f9a7c2535b 100644 --- a/cinder/tests/unit/api/v3/test_volume_manage.py +++ b/cinder/tests/unit/api/v3/test_volume_manage.py @@ -12,7 +12,9 @@ # License for the specific language governing permissions and limitations # under the License. +import ddt import mock +from oslo_config import cfg from oslo_serialization import jsonutils try: from urllib import urlencode @@ -22,12 +24,16 @@ import webob from cinder.api.v3 import router as router_v3 from cinder import context +from cinder import objects from cinder import test from cinder.tests.unit.api.contrib import test_volume_manage as test_contrib from cinder.tests.unit.api import fakes from cinder.tests.unit import fake_constants as fake +CONF = cfg.CONF + + def app(): # no auth, just let environ['cinder.context'] pass through api = router_v3.APIRouter() @@ -36,6 +42,7 @@ def app(): return mapper +@ddt.ddt @mock.patch('cinder.objects.service.Service.get_by_host_and_topic', test_contrib.service_get) @mock.patch('cinder.volume.volume_types.get_volume_type_by_name', @@ -83,9 +90,10 @@ class VolumeManageTest(test.TestCase): res = self._get_resp_post(body) self.assertEqual(400, res.status_int, res) - def _get_resp_get(self, host, detailed, paging, version="3.8"): + def _get_resp_get(self, host, detailed, paging, version="3.8", **kwargs): """Helper to execute a GET os-volume-manage API call.""" - params = {'host': host} + params = {'host': host} if host else {} + params.update(kwargs) if paging: params.update({'marker': '1234', 'limit': 10, 'offset': 4, 'sort': 'reference:asc'}) @@ -134,3 +142,56 @@ class VolumeManageTest(test.TestCase): def test_get_manageable_volumes_detail_previous_version(self): res = self._get_resp_get('fakehost', True, False, version="3.7") self.assertEqual(404, res.status_int) + + @ddt.data((True, True, 'detail_list'), (True, False, 'summary_list'), + (False, True, 'detail_list'), (False, False, 'summary_list')) + @ddt.unpack + @mock.patch('cinder.objects.Service.is_up', True) + @mock.patch('cinder.volume.rpcapi.VolumeAPI._get_cctxt') + @mock.patch('cinder.objects.Service.get_by_id') + def test_get_manageable_detail(self, clustered, is_detail, view_method, + get_service_mock, get_cctxt_mock): + if clustered: + host = None + cluster_name = 'mycluster' + version = '3.17' + kwargs = {'cluster': cluster_name} + else: + host = 'fakehost' + cluster_name = None + version = '3.8' + kwargs = {} + service = objects.Service(disabled=False, host='fakehost', + cluster_name=cluster_name) + get_service_mock.return_value = service + volumes = [mock.sentinel.volume1, mock.sentinel.volume2] + get_cctxt_mock.return_value.call.return_value = volumes + + view_data = {'manageable-volumes': [{'vol': str(v)} for v in volumes]} + view_path = ('cinder.api.views.manageable_volumes.ViewBuilder.' + + view_method) + with mock.patch(view_path, return_value=view_data) as detail_view_mock: + res = self._get_resp_get(host, is_detail, False, version=version, + **kwargs) + + self.assertEqual(200, res.status_int) + get_cctxt_mock.assert_called_once_with(service.service_topic_queue) + get_cctxt_mock.return_value.call.assert_called_once_with( + mock.ANY, 'get_manageable_volumes', marker=None, + limit=CONF.osapi_max_limit, offset=0, sort_keys=['reference'], + sort_dirs=['desc']) + detail_view_mock.assert_called_once_with(mock.ANY, volumes, + len(volumes)) + get_service_mock.assert_called_once_with( + mock.ANY, None, host=host, binary='cinder-volume', + cluster_name=cluster_name) + + @ddt.data('3.8', '3.17') + def test_get_manageable_missing_host(self, version): + res = self._get_resp_get(None, True, False, version=version) + self.assertEqual(400, res.status_int) + + def test_get_manageable_both_host_cluster(self): + res = self._get_resp_get('host', True, False, version='3.17', + cluster='cluster') + self.assertEqual(400, res.status_int) diff --git a/cinder/tests/unit/consistencygroup/test_cg.py b/cinder/tests/unit/consistencygroup/test_cg.py index cdb0668cd73..61430a83b9d 100644 --- a/cinder/tests/unit/consistencygroup/test_cg.py +++ b/cinder/tests/unit/consistencygroup/test_cg.py @@ -664,7 +664,7 @@ class ConsistencyGroupTestCase(base.BaseVolumeTestCase): self.volume.host = 'host1@backend2' self.volume.create_volume(self.context, volume) - self.assertRaises(exception.InvalidVolume, + self.assertRaises(exception.Invalid, self.volume.delete_consistencygroup, self.context, group) diff --git a/cinder/tests/unit/group/test_groups_manager.py b/cinder/tests/unit/group/test_groups_manager.py index 01b1826851e..cda9af67aa6 100644 --- a/cinder/tests/unit/group/test_groups_manager.py +++ b/cinder/tests/unit/group/test_groups_manager.py @@ -698,7 +698,7 @@ class GroupManagerTestCase(test.TestCase): self.volume.host = 'host1@backend2' self.volume.create_volume(self.context, volume) - self.assertRaises(exception.InvalidVolume, + self.assertRaises(exception.Invalid, self.volume.delete_group, self.context, group) diff --git a/cinder/tests/unit/test_volume.py b/cinder/tests/unit/test_volume.py index 5016fa271ae..c05e60ecf47 100644 --- a/cinder/tests/unit/test_volume.py +++ b/cinder/tests/unit/test_volume.py @@ -260,6 +260,47 @@ class VolumeTestCase(base.BaseVolumeTestCase): self.volume.delete_volume(self.context, vol3) self.volume.delete_volume(self.context, vol4) + @mock.patch('cinder.manager.CleanableManager.init_host') + def test_init_host_count_allocated_capacity_cluster(self, init_host_mock): + cluster_name = 'mycluster' + self.volume.cluster = cluster_name + # All these volumes belong to the same cluster, so we will calculate + # the capacity of them all because we query the DB by cluster_name. + tests_utils.create_volume(self.context, size=100, host=CONF.host, + cluster_name=cluster_name) + tests_utils.create_volume( + self.context, size=128, cluster_name=cluster_name, + host=volutils.append_host(CONF.host, 'pool0')) + tests_utils.create_volume( + self.context, size=256, cluster_name=cluster_name, + host=volutils.append_host(CONF.host + '2', 'pool0')) + tests_utils.create_volume( + self.context, size=512, cluster_name=cluster_name, + host=volutils.append_host(CONF.host + '2', 'pool1')) + tests_utils.create_volume( + self.context, size=1024, cluster_name=cluster_name, + host=volutils.append_host(CONF.host + '3', 'pool2')) + + # These don't belong to the cluster so they will be ignored + tests_utils.create_volume( + self.context, size=1024, + host=volutils.append_host(CONF.host, 'pool2')) + tests_utils.create_volume( + self.context, size=1024, cluster_name=cluster_name + '1', + host=volutils.append_host(CONF.host + '3', 'pool2')) + + self.volume.init_host(service_id=self.service_id) + init_host_mock.assert_called_once_with( + service_id=self.service_id, added_to_cluster=None) + stats = self.volume.stats + self.assertEqual(2020, stats['allocated_capacity_gb']) + self.assertEqual( + 384, stats['pools']['pool0']['allocated_capacity_gb']) + self.assertEqual( + 512, stats['pools']['pool1']['allocated_capacity_gb']) + self.assertEqual( + 1024, stats['pools']['pool2']['allocated_capacity_gb']) + @mock.patch.object(driver.BaseVD, "update_provider_info") def test_init_host_sync_provider_info(self, mock_update): vol0 = tests_utils.create_volume( @@ -328,6 +369,50 @@ class VolumeTestCase(base.BaseVolumeTestCase): self.volume.delete_volume(self.context, vol0) self.volume.delete_volume(self.context, vol1) + @mock.patch.object(driver.BaseVD, "update_provider_info") + def test_init_host_sync_provider_info_no_update_cluster(self, mock_update): + cluster_name = 'mycluster' + self.volume.cluster = cluster_name + vol0 = tests_utils.create_volume( + self.context, size=1, host=CONF.host, cluster_name=cluster_name) + vol1 = tests_utils.create_volume( + self.context, size=1, host=CONF.host + '2', + cluster_name=cluster_name) + vol2 = tests_utils.create_volume( + self.context, size=1, host=CONF.host) + vol3 = tests_utils.create_volume( + self.context, size=1, host=CONF.host, + cluster_name=cluster_name + '2') + snap0 = tests_utils.create_snapshot(self.context, vol0.id) + snap1 = tests_utils.create_snapshot(self.context, vol1.id) + tests_utils.create_snapshot(self.context, vol2.id) + tests_utils.create_snapshot(self.context, vol3.id) + mock_update.return_value = ([], []) + # initialize + self.volume.init_host(service_id=self.service_id) + # Grab volume and snapshot objects + vol0_obj = objects.Volume.get_by_id(context.get_admin_context(), + vol0.id) + vol1_obj = objects.Volume.get_by_id(context.get_admin_context(), + vol1.id) + snap0_obj = objects.Snapshot.get_by_id(self.context, snap0.id) + snap1_obj = objects.Snapshot.get_by_id(self.context, snap1.id) + + self.assertSetEqual({vol0.id, vol1.id}, + {vol.id for vol in mock_update.call_args[0][0]}) + self.assertSetEqual({snap0.id, snap1.id}, + {snap.id for snap in mock_update.call_args[0][1]}) + # Check provider ids are not changed + self.assertIsNone(vol0_obj.provider_id) + self.assertIsNone(vol1_obj.provider_id) + self.assertIsNone(snap0_obj.provider_id) + self.assertIsNone(snap1_obj.provider_id) + # Clean up + self.volume.delete_snapshot(self.context, snap0_obj) + self.volume.delete_snapshot(self.context, snap1_obj) + self.volume.delete_volume(self.context, vol0) + self.volume.delete_volume(self.context, vol1) + @mock.patch('cinder.volume.manager.VolumeManager.' '_include_resources_in_cluster') def test_init_host_cluster_not_changed(self, include_in_cluster_mock): @@ -356,7 +441,7 @@ class VolumeTestCase(base.BaseVolumeTestCase): vol_get_all_mock.assert_called_once_with( mock.ANY, filters={'cluster_name': cluster}) snap_get_all_mock.assert_called_once_with( - mock.ANY, search_opts={'cluster_name': cluster}) + mock.ANY, filters={'cluster_name': cluster}) @mock.patch('cinder.objects.service.Service.get_minimum_rpc_version') @mock.patch('cinder.objects.service.Service.get_minimum_obj_version') diff --git a/cinder/tests/unit/test_volume_rpcapi.py b/cinder/tests/unit/test_volume_rpcapi.py index 5fc54a0c14c..ead962945b3 100644 --- a/cinder/tests/unit/test_volume_rpcapi.py +++ b/cinder/tests/unit/test_volume_rpcapi.py @@ -16,9 +16,9 @@ Unit Tests for cinder.volume.rpcapi """ import copy + import ddt import mock - from oslo_config import cfg from oslo_serialization import jsonutils @@ -328,12 +328,17 @@ class VolumeRpcAPITestCase(test.TestCase): self._test_volume_api('delete_consistencygroup', rpc_method='cast', group=self.fake_src_cg, version='3.0') - def test_update_consistencygroup(self): + @ddt.data(None, 'my_cluster') + def test_update_consistencygroup(self, cluster_name): + self._change_cluster_name(self.fake_cg, cluster_name) self._test_volume_api('update_consistencygroup', rpc_method='cast', group=self.fake_cg, add_volumes=['vol1'], remove_volumes=['vol2'], version='3.0') - def test_create_cgsnapshot(self): + @ddt.data(None, 'my_cluster') + def test_create_cgsnapshot(self, cluster_name): + self._change_cluster_name(self.fake_cgsnap.consistencygroup, + cluster_name) self._test_volume_api('create_cgsnapshot', rpc_method='cast', cgsnapshot=self.fake_cgsnap, version='3.0') @@ -376,7 +381,9 @@ class VolumeRpcAPITestCase(test.TestCase): cascade=True, version='3.0') - def test_create_snapshot(self): + @ddt.data(None, 'mycluster') + def test_create_snapshot(self, cluster_name): + self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_volume_api('create_snapshot', rpc_method='cast', volume=self.fake_volume_obj, @@ -475,7 +482,9 @@ class VolumeRpcAPITestCase(test.TestCase): attachment_id='fake_uuid', version=version) - def test_copy_volume_to_image(self): + @ddt.data(None, 'mycluster') + def test_copy_volume_to_image(self, cluster_name): + self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_volume_api('copy_volume_to_image', rpc_method='cast', volume=self.fake_volume_obj, @@ -517,7 +526,9 @@ class VolumeRpcAPITestCase(test.TestCase): force=False, version='3.0') - def test_accept_transfer(self): + @ddt.data(None, 'mycluster') + def test_accept_transfer(self, cluster_name): + self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_volume_api('accept_transfer', rpc_method='call', volume=self.fake_volume_obj, @@ -653,28 +664,34 @@ class VolumeRpcAPITestCase(test.TestCase): volume=self.fake_volume_obj, version='3.0') + @ddt.data(None, 'mycluster') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=True) - def test_get_backup_device(self, mock_can_send_version): + def test_get_backup_device(self, cluster_name, mock_can_send_version): + self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_volume_api('get_backup_device', rpc_method='call', backup=self.fake_backup_obj, volume=self.fake_volume_obj, version='3.2') + @ddt.data(None, 'mycluster') @mock.patch('oslo_messaging.RPCClient.can_send_version', return_value=False) @mock.patch('cinder.objects.backup.BackupDeviceInfo.from_primitive', return_value={}) - def test_get_backup_device_old(self, mock_from_primitive, + def test_get_backup_device_old(self, cluster_name, mock_from_primitive, mock_can_send_version): + self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_volume_api('get_backup_device', rpc_method='call', backup=self.fake_backup_obj, volume=self.fake_volume_obj, version='3.0') - def test_secure_file_operations_enabled(self): + @ddt.data(None, 'mycluster') + def test_secure_file_operations_enabled(self, cluster_name): + self._change_cluster_name(self.fake_volume_obj, cluster_name) self._test_volume_api('secure_file_operations_enabled', rpc_method='call', volume=self.fake_volume_obj, @@ -693,7 +710,9 @@ class VolumeRpcAPITestCase(test.TestCase): self._test_group_api('delete_group', rpc_method='cast', group=self.fake_group, version='3.0') - def test_update_group(self): + @ddt.data(None, 'mycluster') + def test_update_group(self, cluster_name): + self._change_cluster_name(self.fake_group, cluster_name) self._test_group_api('update_group', rpc_method='cast', group=self.fake_group, add_volumes=['vol1'], remove_volumes=['vol2'], version='3.0') diff --git a/cinder/transfer/api.py b/cinder/transfer/api.py index 6800fbc526c..6b02cdecac4 100644 --- a/cinder/transfer/api.py +++ b/cinder/transfer/api.py @@ -30,6 +30,7 @@ import six from cinder.db import base from cinder import exception from cinder.i18n import _, _LE, _LI +from cinder import objects from cinder import quota from cinder import quota_utils from cinder.volume import api as volume_api @@ -162,7 +163,7 @@ class API(base.Base): raise exception.InvalidAuthKey(reason=msg) volume_id = transfer['volume_id'] - vol_ref = self.db.volume_get(context.elevated(), volume_id) + vol_ref = objects.Volume.get_by_id(context.elevated(), volume_id) if vol_ref['consistencygroup_id']: msg = _("Volume %s must not be part of a consistency " "group.") % vol_ref['id'] diff --git a/cinder/volume/api.py b/cinder/volume/api.py index b57aa7dd4e2..a18f9ac55cc 100644 --- a/cinder/volume/api.py +++ b/cinder/volume/api.py @@ -1657,10 +1657,11 @@ class API(base.Base): resource=vol_ref) return vol_ref - def get_manageable_volumes(self, context, host, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - self._get_service_by_host_cluster(context, host, None) - return self.volume_rpcapi.get_manageable_volumes(context, host, + def get_manageable_volumes(self, context, host, cluster_name, marker=None, + limit=None, offset=None, sort_keys=None, + sort_dirs=None): + svc = self._get_service_by_host_cluster(context, host, cluster_name) + return self.volume_rpcapi.get_manageable_volumes(context, svc, marker, limit, offset, sort_keys, sort_dirs) @@ -1680,10 +1681,12 @@ class API(base.Base): context, snapshot_object, ref, service.service_topic_queue) return snapshot_object - def get_manageable_snapshots(self, context, host, marker=None, limit=None, - offset=None, sort_keys=None, sort_dirs=None): - self._get_service_by_host_cluster(context, host, None, 'snapshot') - return self.volume_rpcapi.get_manageable_snapshots(context, host, + def get_manageable_snapshots(self, context, host, cluster_name, + marker=None, limit=None, offset=None, + sort_keys=None, sort_dirs=None): + svc = self._get_service_by_host_cluster(context, host, cluster_name, + 'snapshot') + return self.volume_rpcapi.get_manageable_snapshots(context, svc, marker, limit, offset, sort_keys, sort_dirs) diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index fc1f074daaa..1a114639a0f 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -418,12 +418,8 @@ class VolumeManager(manager.CleanableManager, # Initialize backend capabilities list self.driver.init_capabilities() - if self.cluster: - filters = {'cluster_name': self.cluster} - else: - filters = {'host': self.host} - volumes = objects.VolumeList.get_all(ctxt, filters=filters) - snapshots = objects.SnapshotList.get_all(ctxt, search_opts=filters) + volumes = self._get_my_volumes(ctxt) + snapshots = self._get_my_snapshots(ctxt) self._sync_provider_info(ctxt, volumes, snapshots) # FIXME volume count for exporting is wrong @@ -651,9 +647,16 @@ class VolumeManager(manager.CleanableManager, LOG.info(_LI("Created volume successfully."), resource=volume) return volume.id - def _is_our_resource(self, resource): - resource_topic = vol_utils.extract_host(resource.service_topic_queue) - return resource_topic == self.service_topic_queue + def _check_is_our_resource(self, resource): + if resource.host: + res_backend = vol_utils.extract_host(resource.service_topic_queue) + backend = vol_utils.extract_host(self.service_topic_queue) + if res_backend != backend: + msg = (_('Invalid %(resource)s: %(resource)s %(id)s is not ' + 'local to %(backend)s.') % + {'resource': resource.obj_name, 'id': resource.id, + 'backend': backend}) + raise exception.Invalid(msg) @coordination.synchronized('{volume.id}-{f_name}') @objects.Volume.set_workers @@ -687,9 +690,7 @@ class VolumeManager(manager.CleanableManager, if volume['attach_status'] == fields.VolumeAttachStatus.ATTACHED: # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=volume.id) - if not self._is_our_resource(volume): - raise exception.InvalidVolume( - reason=_("volume is not local to this node")) + self._check_is_our_resource(volume) if unmanage_only and cascade: # This could be done, but is ruled out for now just @@ -2413,6 +2414,19 @@ class VolumeManager(manager.CleanableManager, return vol_ref + def _get_my_resources(self, ctxt, ovo_class_list): + if self.cluster: + filters = {'cluster_name': self.cluster} + else: + filters = {'host': self.host} + return getattr(ovo_class_list, 'get_all')(ctxt, filters=filters) + + def _get_my_volumes(self, ctxt): + return self._get_my_resources(ctxt, objects.VolumeList) + + def _get_my_snapshots(self, ctxt): + return self._get_my_resources(ctxt, objects.SnapshotList) + def get_manageable_volumes(self, ctxt, marker, limit, offset, sort_keys, sort_dirs): try: @@ -2422,7 +2436,7 @@ class VolumeManager(manager.CleanableManager, LOG.exception(_LE("Listing manageable volumes failed, due " "to uninitialized driver.")) - cinder_volumes = objects.VolumeList.get_all_by_host(ctxt, self.host) + cinder_volumes = self._get_my_volumes(ctxt) try: driver_entries = self.driver.get_manageable_volumes( cinder_volumes, marker, limit, offset, sort_keys, sort_dirs) @@ -2951,13 +2965,7 @@ class VolumeManager(manager.CleanableManager, fields.VolumeAttachStatus.ATTACHED): # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=volume.id) - # self.host is 'host@backend' - # volume.host is 'host@backend#pool' - # Extract host before doing comparison - if volume.host: - if not self._is_our_resource(volume): - raise exception.InvalidVolume( - reason=_("Volume is not local to this node")) + self._check_is_our_resource(volume) self._notify_about_consistencygroup_usage( context, group, "delete.start") @@ -3076,15 +3084,7 @@ class VolumeManager(manager.CleanableManager, if vol_obj.attach_status == "attached": # Volume is still attached, need to detach first raise exception.VolumeAttached(volume_id=vol_obj.id) - # self.host is 'host@backend' - # vol_obj.host is 'host@backend#pool' - # Extract host before doing comparison - if vol_obj.host: - if not self._is_our_resource(vol_obj): - backend = vol_utils.extract_host(self.service_topic_queue) - msg = (_("Volume %(vol_id)s is not local to %(backend)s") % - {'vol_id': vol_obj.id, 'backend': backend}) - raise exception.InvalidVolume(reason=msg) + self._check_is_our_resource(vol_obj) self._notify_about_group_usage( context, group, "delete.start") @@ -3240,7 +3240,7 @@ class VolumeManager(manager.CleanableManager, remove_volumes_list = remove_volumes.split(',') for add_vol in add_volumes_list: try: - add_vol_ref = self.db.volume_get(context, add_vol) + add_vol_ovo = objects.Volume.get_by_id(context, add_vol) except exception.VolumeNotFound: LOG.error(_LE("Update consistency group " "failed to add volume-%(volume_id)s: " @@ -3249,23 +3249,17 @@ class VolumeManager(manager.CleanableManager, resource={'type': 'consistency_group', 'id': group.id}) raise - if add_vol_ref['status'] not in VALID_ADD_VOL_TO_CG_STATUS: + if add_vol_ovo.status not in VALID_ADD_VOL_TO_CG_STATUS: msg = (_("Cannot add volume %(volume_id)s to consistency " "group %(group_id)s because volume is in an invalid " "state: %(status)s. Valid states are: %(valid)s.") % - {'volume_id': add_vol_ref['id'], + {'volume_id': add_vol_ovo.id, 'group_id': group.id, - 'status': add_vol_ref['status'], + 'status': add_vol_ovo.status, 'valid': VALID_ADD_VOL_TO_CG_STATUS}) raise exception.InvalidVolume(reason=msg) - # self.host is 'host@backend' - # volume_ref['host'] is 'host@backend#pool' - # Extract host before doing comparison - new_host = vol_utils.extract_host(add_vol_ref['host']) - if new_host != self.host: - raise exception.InvalidVolume( - reason=_("Volume is not local to this node.")) - add_volumes_ref.append(add_vol_ref) + self._check_is_our_resource(add_vol_ovo) + add_volumes_ref.append(add_vol_ovo) for remove_vol in remove_volumes_list: try: @@ -3402,13 +3396,7 @@ class VolumeManager(manager.CleanableManager, 'status': add_vol_ref.status, 'valid': VALID_ADD_VOL_TO_GROUP_STATUS}) raise exception.InvalidVolume(reason=msg) - # self.host is 'host@backend' - # volume_ref['host'] is 'host@backend#pool' - # Extract host before doing comparison - new_host = vol_utils.extract_host(add_vol_ref.host) - if new_host != self.host: - raise exception.InvalidVolume( - reason=_("Volume is not local to this node.")) + self._check_is_our_resource(add_vol_ref) add_volumes_ref.append(add_vol_ref) for remove_vol in remove_volumes_list: @@ -4238,7 +4226,7 @@ class VolumeManager(manager.CleanableManager, LOG.exception(_LE("Listing manageable snapshots failed, due " "to uninitialized driver.")) - cinder_snapshots = self.db.snapshot_get_by_host(ctxt, self.host) + cinder_snapshots = self._get_my_snapshots(ctxt) try: driver_entries = self.driver.get_manageable_snapshots( cinder_snapshots, marker, limit, offset, sort_keys, sort_dirs) diff --git a/cinder/volume/rpcapi.py b/cinder/volume/rpcapi.py index 8decac677ed..4047c1c46d7 100644 --- a/cinder/volume/rpcapi.py +++ b/cinder/volume/rpcapi.py @@ -138,7 +138,7 @@ class VolumeAPI(rpc.RPCAPI): def update_consistencygroup(self, ctxt, group, add_volumes=None, remove_volumes=None): - cctxt = self._get_cctxt(group.host) + cctxt = self._get_cctxt(group.service_topic_queue) cctxt.cast(ctxt, 'update_consistencygroup', group=group, add_volumes=add_volumes, @@ -153,7 +153,7 @@ class VolumeAPI(rpc.RPCAPI): source_cg=source_cg) def create_cgsnapshot(self, ctxt, cgsnapshot): - cctxt = self._get_cctxt(cgsnapshot.consistencygroup.host) + cctxt = self._get_cctxt(cgsnapshot.service_topic_queue) cctxt.cast(ctxt, 'create_cgsnapshot', cgsnapshot=cgsnapshot) def delete_cgsnapshot(self, ctxt, cgsnapshot): @@ -181,7 +181,7 @@ class VolumeAPI(rpc.RPCAPI): def create_snapshot(self, ctxt, volume, snapshot): snapshot.create_worker() - cctxt = self._get_cctxt(volume['host']) + cctxt = self._get_cctxt(volume.service_topic_queue) cctxt.cast(ctxt, 'create_snapshot', snapshot=snapshot) def delete_snapshot(self, ctxt, snapshot, unmanage_only=False): @@ -212,7 +212,7 @@ class VolumeAPI(rpc.RPCAPI): return cctxt.call(ctxt, 'detach_volume', **msg_args) def copy_volume_to_image(self, ctxt, volume, image_meta): - cctxt = self._get_cctxt(volume['host']) + cctxt = self._get_cctxt(volume.service_topic_queue) cctxt.cast(ctxt, 'copy_volume_to_image', volume_id=volume['id'], image_meta=image_meta) @@ -235,7 +235,7 @@ class VolumeAPI(rpc.RPCAPI): cctxt.cast(ctxt, 'publish_service_capabilities') def accept_transfer(self, ctxt, volume, new_user, new_project): - cctxt = self._get_cctxt(volume['host']) + cctxt = self._get_cctxt(volume.service_topic_queue) return cctxt.call(ctxt, 'accept_transfer', volume_id=volume['id'], new_user=new_user, new_project=new_project) @@ -319,7 +319,7 @@ class VolumeAPI(rpc.RPCAPI): return cctxt.call(ctxt, 'get_capabilities', discover=discover) def get_backup_device(self, ctxt, backup, volume): - cctxt = self._get_cctxt(volume.host, ('3.2', '3.0')) + cctxt = self._get_cctxt(volume.service_topic_queue, ('3.2', '3.0')) if cctxt.can_send_version('3.2'): backup_obj = cctxt.call(ctxt, 'get_backup_device', backup=backup, want_objects=True) @@ -330,20 +330,20 @@ class VolumeAPI(rpc.RPCAPI): return backup_obj def secure_file_operations_enabled(self, ctxt, volume): - cctxt = self._get_cctxt(volume.host) + cctxt = self._get_cctxt(volume.service_topic_queue) return cctxt.call(ctxt, 'secure_file_operations_enabled', volume=volume) - def get_manageable_volumes(self, ctxt, host, marker, limit, offset, + def get_manageable_volumes(self, ctxt, service, marker, limit, offset, sort_keys, sort_dirs): - cctxt = self._get_cctxt(host) + cctxt = self._get_cctxt(service.service_topic_queue) return cctxt.call(ctxt, 'get_manageable_volumes', marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) - def get_manageable_snapshots(self, ctxt, host, marker, limit, offset, + def get_manageable_snapshots(self, ctxt, service, marker, limit, offset, sort_keys, sort_dirs): - cctxt = self._get_cctxt(host) + cctxt = self._get_cctxt(service.service_topic_queue) return cctxt.call(ctxt, 'get_manageable_snapshots', marker=marker, limit=limit, offset=offset, sort_keys=sort_keys, sort_dirs=sort_dirs) @@ -357,7 +357,7 @@ class VolumeAPI(rpc.RPCAPI): cctxt.cast(ctxt, 'delete_group', group=group) def update_group(self, ctxt, group, add_volumes=None, remove_volumes=None): - cctxt = self._get_cctxt(group.host) + cctxt = self._get_cctxt(group.service_topic_queue) cctxt.cast(ctxt, 'update_group', group=group, add_volumes=add_volumes, remove_volumes=remove_volumes) @@ -368,7 +368,7 @@ class VolumeAPI(rpc.RPCAPI): group_snapshot=group_snapshot, source_group=source_group) def create_group_snapshot(self, ctxt, group_snapshot): - cctxt = self._get_cctxt(group_snapshot.group.host) + cctxt = self._get_cctxt(group_snapshot.service_topic_queue) cctxt.cast(ctxt, 'create_group_snapshot', group_snapshot=group_snapshot) diff --git a/tools/lintstack.py b/tools/lintstack.py index c36b04936fe..6bf31b0bf3b 100755 --- a/tools/lintstack.py +++ b/tools/lintstack.py @@ -69,6 +69,10 @@ ignore_messages = [ # NOTE(dulek): This one is related to objects. "No value passed for parameter 'id' in function call", + + # NOTE(geguileo): v3 common manage class for volumes and snapshots + "Instance of 'ManageResource' has no 'volume_api' member", + "Instance of 'ManageResource' has no '_list_manageable_view' member", ] # Note(maoy): We ignore cinder.tests for now due to high false @@ -174,6 +178,7 @@ class LintOutput(object): 'code': self.code, 'message': self.message}) + class ErrorKeys(object): @classmethod