From 043ada94eb35c6210367857714840188febf75a4 Mon Sep 17 00:00:00 2001 From: Gorka Eguileor Date: Fri, 26 Oct 2018 14:06:58 +0200 Subject: [PATCH] Fix non iSCSI attach serialization When the shared_targets concept was introduced it defaulted to True for any driver not reporting that capability. This meant that the attachment of all volumes would be serialized even if they have no concept of a "shared target". This is caused because that feature was intended for iSCSI connections, but was applied to all connection types, introducing an unnecessary bottleneck on drivers like RBD or FC. This patch makes sure to check that the storage_protocol is also iSCSI. Closes-Bug: #1800136 Change-Id: I2805e8acf560cb941ddd3454477d89fe5a13d37f --- cinder/cmd/manage.py | 4 +- cinder/tests/unit/test_cmd.py | 82 +++++++++++++++-------------------- cinder/volume/manager.py | 17 ++++---- 3 files changed, 46 insertions(+), 57 deletions(-) diff --git a/cinder/cmd/manage.py b/cinder/cmd/manage.py index f401b1ab77c..3772f1f429b 100644 --- a/cinder/cmd/manage.py +++ b/cinder/cmd/manage.py @@ -107,7 +107,9 @@ def _get_non_shared_target_hosts(ctxt): constants.VOLUME_TOPIC) for service in services: capabilities = rpcapi.get_capabilities(ctxt, service.host, True) - if not capabilities.get('shared_targets', True): + # Select only non iSCSI connections and iSCSI that are explicit + if (capabilities.get('storage_protocol') != 'iSCSI' or + not capabilities.get('shared_targets', True)): hosts.append(service.host) numvols_needing_update += db_api.model_query( ctxt, models.Volume).filter_by( diff --git a/cinder/tests/unit/test_cmd.py b/cinder/tests/unit/test_cmd.py index 5a848488e36..f9c4ae654dc 100644 --- a/cinder/tests/unit/test_cmd.py +++ b/cinder/tests/unit/test_cmd.py @@ -2190,80 +2190,66 @@ class TestVolumeSharedTargetsOnlineMigration(test.TestCase): self.patch('cinder.objects.Service.get_minimum_rpc_version', side_effect=_get_minimum_rpc_version_mock) - @mock.patch('cinder.objects.Service.get_minimum_obj_version', - return_value='1.8') - def test_shared_targets_migrations(self, mock_version): - """Ensure we can update the column.""" ctxt = context.get_admin_context() - sqlalchemy_api.volume_create( - ctxt, - {'host': 'host1@lvm-driver1#lvm-driver1', - 'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}) + # default value in db for shared_targets on a volume + # is True, so don't need to set it here explicitly + for i in range(3): + sqlalchemy_api.volume_create( + ctxt, + {'host': 'host1@lvm-driver1#lvm-driver1', + 'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}) - # Create another one correct setting - sqlalchemy_api.volume_create( - ctxt, - {'host': 'host1@lvm-driver1#lvm-driver1', - 'shared_targets': False, - 'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}) - - # Need a service to query values = { 'host': 'host1@lvm-driver1', 'binary': constants.VOLUME_BINARY, 'topic': constants.VOLUME_TOPIC, 'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'} utils.create_service(ctxt, values) + self.ctxt = ctxt + @mock.patch('cinder.objects.Service.get_minimum_obj_version', + return_value='1.8') + def test_shared_targets_migrations(self, mock_version): + """Ensure we can update the column.""" # Run the migration and verify that we updated 1 entry with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities', - return_value={'shared_targets': False}): + return_value={'connection_protocol': 'iSCSI', + 'shared_targets': False}): total, updated = ( cinder_manage.shared_targets_online_data_migration( - ctxt, 10)) - self.assertEqual(1, total) - self.assertEqual(1, updated) + self.ctxt, 10)) + self.assertEqual(3, total) + self.assertEqual(3, updated) + + @mock.patch('cinder.objects.Service.get_minimum_obj_version', + return_value='1.8') + def test_shared_targets_migrations_non_iscsi(self, mock_version): + """Ensure we can update the column.""" + # Run the migration and verify that we updated 1 entry + with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities', + return_value={'connection_protocol': 'RBD'}): + total, updated = ( + cinder_manage.shared_targets_online_data_migration( + self.ctxt, 10)) + self.assertEqual(3, total) + self.assertEqual(3, updated) @mock.patch('cinder.objects.Service.get_minimum_obj_version', return_value='1.8') def test_shared_targets_migrations_with_limit(self, mock_version): """Ensure we update in batches.""" - ctxt = context.get_admin_context() - # default value in db for shared_targets on a volume - # is True, so don't need to set it here explicitly - sqlalchemy_api.volume_create( - ctxt, - {'host': 'host1@lvm-driver1#lvm-driver1', - 'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}) - - sqlalchemy_api.volume_create( - ctxt, - {'host': 'host1@lvm-driver1#lvm-driver1', - 'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}) - - sqlalchemy_api.volume_create( - ctxt, - {'host': 'host1@lvm-driver1#lvm-driver1', - 'service_uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'}) - - values = { - 'host': 'host1@lvm-driver1', - 'binary': constants.VOLUME_BINARY, - 'topic': constants.VOLUME_TOPIC, - 'uuid': 'f080f895-cff2-4eb3-9c61-050c060b59ad'} - utils.create_service(ctxt, values) - # Run the migration and verify that we updated 1 entry with mock.patch('cinder.volume.rpcapi.VolumeAPI.get_capabilities', - return_value={'shared_targets': False}): + return_value={'connection_protocol': 'iSCSI', + 'shared_targets': False}): total, updated = ( cinder_manage.shared_targets_online_data_migration( - ctxt, 2)) + self.ctxt, 2)) self.assertEqual(3, total) self.assertEqual(2, updated) total, updated = ( cinder_manage.shared_targets_online_data_migration( - ctxt, 2)) + self.ctxt, 2)) self.assertEqual(1, total) self.assertEqual(1, updated) diff --git a/cinder/volume/manager.py b/cinder/volume/manager.py index 1e078ea5dc8..bfe7c4c8cdb 100644 --- a/cinder/volume/manager.py +++ b/cinder/volume/manager.py @@ -689,14 +689,15 @@ class VolumeManager(manager.CleanableManager, self._update_allocated_capacity(volume, decrement=True, host=original_host) - shared_targets = ( - 1 - if self.driver.capabilities.get('shared_targets', True) - else 0) - updates = {'service_uuid': self.service_uuid, - 'shared_targets': shared_targets} - - volume.update(updates) + # Shared targets is only relevant for iSCSI connections. + # We default to True to be on the safe side. + volume.shared_targets = ( + self.driver.capabilities.get('storage_protocol') == 'iSCSI' and + self.driver.capabilities.get('shared_targets', True)) + # TODO(geguileo): service_uuid won't be enough on Active/Active + # deployments. There can be 2 services handling volumes from the same + # backend. + volume.service_uuid = self.service_uuid volume.save() LOG.info("Created volume successfully.", resource=volume)