Address comments when moving volume detach to block_device.py

This addresses some of the nit comments from change:

I7a53e08f3fad6abb27a1d8ad425b4f916341cab3

The TODOs in the test_compute_mgr are clarified to explain
that rather than completely move those tests, which we don't
want to do since we still want test coverage on the compute
manager code, what we really want is test coverage for the
detach and driver_detach methods that live in
DriverVolumeBlockDevice now.

Change-Id: I07a05a52a832f9e2687369666c6140c076412856
This commit is contained in:
Matt Riedemann 2017-03-31 11:48:57 -04:00 committed by Lee Yarwood
parent 88ac5dbf08
commit 8c8b2180e3
2 changed files with 18 additions and 11 deletions

View File

@ -2440,11 +2440,13 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
self.context, uuids.volume_id, connector)
def test_detach_volume(self):
# TODO(lyarwood): Move into ../virt/test_block_device.py
# TODO(lyarwood): Test DriverVolumeBlockDevice.detach in
# ../virt/test_block_device.py
self._test_detach_volume()
def test_detach_volume_not_destroy_bdm(self):
# TODO(lyarwood): Move into ../virt/test_block_device.py
# TODO(lyarwood): Test DriverVolumeBlockDevice.detach in
# ../virt/test_block_device.py
self._test_detach_volume(destroy_bdm=False)
@mock.patch('nova.objects.BlockDeviceMapping.get_by_volume_and_instance')
@ -2454,7 +2456,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
@mock.patch('nova.compute.utils.notify_about_volume_attach_detach')
def _test_detach_volume(self, mock_notify_attach_detach, notify_inst_usage,
detach, bdm_get, destroy_bdm=True):
# TODO(lyarwood): Move into ../virt/test_block_device.py
# TODO(lyarwood): Test DriverVolumeBlockDevice.detach in
# ../virt/test_block_device.py
volume_id = uuids.volume
inst_obj = mock.Mock()
inst_obj.uuid = uuids.instance
@ -2500,7 +2503,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
def test_detach_volume_evacuate(self):
"""For evacuate, terminate_connection is called with original host."""
# TODO(lyarwood): Move into ../virt/test_block_device.py
# TODO(lyarwood): Test DriverVolumeBlockDevice.driver_detach in
# ../virt/test_block_device.py
expected_connector = {'host': 'evacuated-host'}
conn_info_str = '{"connector": {"host": "evacuated-host"}}'
self._test_detach_volume_evacuate(conn_info_str,
@ -2515,7 +2519,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
case because nova does not have the info to get the connector for the
original (evacuated) host.
"""
# TODO(lyarwood): Move into ../virt/test_block_device.py
# TODO(lyarwood): Test DriverVolumeBlockDevice.driver_detach in
# ../virt/test_block_device.py
conn_info_str = '{"foo": "bar"}' # Has no 'connector'.
self._test_detach_volume_evacuate(conn_info_str)
@ -2525,7 +2530,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
For evacuate, if the stashed connector also has the wrong host,
then log it and stay with the local connector.
"""
# TODO(lyarwood): Move into ../virt/test_block_device.py
# TODO(lyarwood): Test DriverVolumeBlockDevice.driver_detach in
# ../virt/test_block_device.py
conn_info_str = '{"connector": {"host": "other-host"}}'
self._test_detach_volume_evacuate(conn_info_str)
@ -2544,7 +2550,8 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
terminate call (optional). Default is to expect the
local connector to be used.
"""
# TODO(lyarwood): Move into ../virt/test_block_device.py
# TODO(lyarwood): Test DriverVolumeBlockDevice.driver_detach in
# ../virt/test_block_device.py
volume_id = 'vol_id'
instance = fake_instance.fake_instance_obj(self.context,
host='evacuated-host')

View File

@ -267,12 +267,12 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
volume_id = self.volume_id
LOG.info(_LI('Attempting to driver detach volume %(volume_id)s from '
' mountpoint %(mp)s'), {'volume_id': volume_id, 'mp': mp},
instance=instance)
'mountpoint %(mp)s'), {'volume_id': volume_id, 'mp': mp},
instance=instance)
try:
if not virt_driver.instance_exists(instance):
LOG.warning(_LW('Detaching volume from unknown instance'),
instance=instance)
instance=instance)
encryption = encryptors.get_encryption_metadata(context,
volume_api, volume_id, connection_info)
@ -304,7 +304,7 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
if CONF.host == instance.host:
self.driver_detach(context, instance, volume_api, virt_driver)
elif not destroy_bdm:
LOG.debug("Skipping _driver_detach during remote rebuild.",
LOG.debug("Skipping driver_detach during remote rebuild.",
instance=instance)
elif destroy_bdm:
LOG.error(_LE("Unable to call for a driver detach of volume "