diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 6115af6f205d..82bf0b1f4df1 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -4660,7 +4660,7 @@ class ComputeManager(manager.Manager): self._notify_about_instance_usage( context, instance, "volume.attach", extra_usage_info=info) - def _driver_detach_volume(self, context, instance, bdm): + def _driver_detach_volume(self, context, instance, bdm, connection_info): """Do the actual driver detach using block device mapping.""" mp = bdm.device_name volume_id = bdm.volume_id @@ -4669,11 +4669,6 @@ class ComputeManager(manager.Manager): {'volume_id': volume_id, 'mp': mp}, context=context, instance=instance) - connection_info = jsonutils.loads(bdm.connection_info) - # NOTE(vish): We currently don't use the serial when disconnecting, - # but added for completeness in case we ever do. - if connection_info and 'serial' not in connection_info: - connection_info['serial'] = volume_id try: if not self.driver.instance_exists(instance): LOG.warning(_LW('Detaching volume from unknown instance'), @@ -4699,8 +4694,6 @@ class ComputeManager(manager.Manager): context=context, instance=instance) self.volume_api.roll_detaching(context, volume_id) - return connection_info - def _detach_volume(self, context, volume_id, instance, destroy_bdm=True, attachment_id=None): """Detach a volume from an instance. @@ -4745,8 +4738,21 @@ class ComputeManager(manager.Manager): self.notifier.info(context, 'volume.usage', compute_utils.usage_volume_info(vol_usage)) - connection_info = self._driver_detach_volume(context, instance, bdm) + connection_info = jsonutils.loads(bdm.connection_info) connector = self.driver.get_volume_connector(instance) + if CONF.host == instance.host: + # Only attempt to detach and disconnect from the volume if the + # instance is currently associated with the local compute host. + self._driver_detach_volume(context, instance, bdm, connection_info) + elif not destroy_bdm: + LOG.debug("Skipping _driver_detach_volume during remote rebuild.", + instance=instance) + elif destroy_bdm: + LOG.error(_LE("Unable to call for a driver detach of volume " + "%(vol_id)s due to the instance being registered to " + "the remote host %(inst_host)s."), + {'vol_id': volume_id, 'inst_host': instance.host}, + instance=instance) if connection_info and not destroy_bdm and ( connector.get('host') != instance.host): @@ -4934,7 +4940,8 @@ class ComputeManager(manager.Manager): try: bdm = objects.BlockDeviceMapping.get_by_volume_and_instance( context, volume_id, instance.uuid) - self._driver_detach_volume(context, instance, bdm) + connection_info = jsonutils.loads(bdm.connection_info) + self._driver_detach_volume(context, instance, bdm, connection_info) connector = self.driver.get_volume_connector(instance) self.volume_api.terminate_connection(context, volume_id, connector) except exception.NotFound: diff --git a/nova/tests/unit/compute/test_compute.py b/nova/tests/unit/compute/test_compute.py index 4ba3f652525b..2438e671818c 100644 --- a/nova/tests/unit/compute/test_compute.py +++ b/nova/tests/unit/compute/test_compute.py @@ -358,7 +358,8 @@ class ComputeVolumeTestCase(BaseTestCase): } self.fake_volume = fake_block_device.FakeDbBlockDeviceDict( {'source_type': 'volume', 'destination_type': 'volume', - 'volume_id': uuids.volume_id, 'device_name': '/dev/vdb'}) + 'volume_id': uuids.volume_id, 'device_name': '/dev/vdb', + 'connection_info': jsonutils.dumps({})}) self.instance_object = objects.Instance._from_db_object( self.context, objects.Instance(), fake_instance.fake_db_instance()) @@ -433,7 +434,7 @@ class ComputeVolumeTestCase(BaseTestCase): self.context, 'fake', instance, 'fake_id') mock_internal_detach.assert_called_once_with(self.context, instance, - fake_bdm) + fake_bdm, {}) self.assertTrue(mock_destroy.called) def test_await_block_device_created_too_slow(self): @@ -11230,9 +11231,11 @@ class EvacuateHostTestCase(BaseTestCase): @mock.patch.object(cinder.API, 'detach') @mock.patch.object(compute_manager.ComputeManager, '_prep_block_device') @mock.patch.object(compute_manager.ComputeManager, '_driver_detach_volume') - def test_rebuild_on_host_with_volumes(self, mock_drv_detach, mock_prep, - mock_detach): - """Confirm evacuate scenario reconnects volumes.""" + def test_rebuild_on_remote_host_with_volumes(self, mock_drv_detach, + mock_prep, mock_detach): + """Confirm that the evacuate scenario does not attempt a driver detach + when rebuilding an instance with volumes on a remote host + """ values = {'instance_uuid': self.inst.uuid, 'source_type': 'volume', 'device_name': '/dev/vdc', @@ -11269,10 +11272,7 @@ class EvacuateHostTestCase(BaseTestCase): for bdm in bdms: db.block_device_mapping_destroy(self.context, bdm['id']) - mock_drv_detach.assert_called_once_with( - test.MatchType(context.RequestContext), - test.MatchType(objects.Instance), - test.MatchType(objects.BlockDeviceMapping)) + self.assertFalse(mock_drv_detach.called) # make sure volumes attach, detach are called mock_detach.assert_called_once_with( test.MatchType(context.RequestContext), diff --git a/nova/tests/unit/compute/test_compute_mgr.py b/nova/tests/unit/compute/test_compute_mgr.py index dce24554d5c4..c1feaf118a44 100755 --- a/nova/tests/unit/compute/test_compute_mgr.py +++ b/nova/tests/unit/compute/test_compute_mgr.py @@ -2319,6 +2319,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): @mock.patch('nova.objects.Instance._from_db_object') def test_remove_volume_connection(self, inst_from_db, detach, bdm_get): bdm = mock.sentinel.bdm + bdm.connection_info = jsonutils.dumps({}) inst_obj = mock.Mock() inst_obj.uuid = 'uuid' bdm_get.return_value = bdm @@ -2326,7 +2327,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): with mock.patch.object(self.compute, 'volume_api'): self.compute.remove_volume_connection(self.context, 'vol', inst_obj) - detach.assert_called_once_with(self.context, inst_obj, bdm) + detach.assert_called_once_with(self.context, inst_obj, bdm, {}) bdm_get.assert_called_once_with(self.context, 'vol', 'uuid') def test_detach_volume(self): @@ -2344,10 +2345,12 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): volume_id = uuids.volume inst_obj = mock.Mock() inst_obj.uuid = uuids.instance + inst_obj.host = CONF.host attachment_id = uuids.attachment bdm = mock.MagicMock(spec=objects.BlockDeviceMapping) bdm.device_name = 'vdb' + bdm.connection_info = jsonutils.dumps({}) bdm_get.return_value = bdm detach.return_value = {} @@ -2362,7 +2365,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): destroy_bdm=destroy_bdm, attachment_id=attachment_id) - detach.assert_called_once_with(self.context, inst_obj, bdm) + detach.assert_called_once_with(self.context, inst_obj, bdm, {}) driver.get_volume_connector.assert_called_once_with(inst_obj) volume_api.terminate_connection.assert_called_once_with( self.context, volume_id, connector_sentinel) @@ -2438,6 +2441,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): instance, destroy_bdm=False) + driver._driver_detach_volume.assert_not_called() driver.get_volume_connector.assert_called_once_with(instance) volume_api.terminate_connection.assert_called_once_with( self.context, volume_id, expected_connector) @@ -2450,22 +2454,6 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase): extra_usage_info={'volume_id': volume_id} ) - def test__driver_detach_volume_return(self): - """_driver_detach_volume returns the connection_info from loads().""" - with mock.patch.object(jsonutils, 'loads') as loads: - conn_info_str = 'test-expected-loads-param' - bdm = mock.Mock() - bdm.connection_info = conn_info_str - loads.return_value = {'test-loads-key': 'test loads return value'} - instance = fake_instance.fake_instance_obj(self.context) - - ret = self.compute._driver_detach_volume(self.context, - instance, - bdm) - - self.assertEqual(loads.return_value, ret) - loads.assert_called_once_with(conn_info_str) - def _test_rescue(self, clean_shutdown=True): instance = fake_instance.fake_instance_obj( self.context, vm_state=vm_states.ACTIVE)