Remove check_attach
This patch finishes to remove the 'check_attach' call from Nova completely. As Cinder already performs the required checks as part of the 'reserve_volume' (os-reserve) call it is unnecessary to check the statemachine in Nova also and it can lead to race conditions. The missing 'reserve_volume' call is added to the BFV flow. In case of build failure the volume will be locked in 'attaching' state until the instance in ERROR state is cleaned up. We also check AZ for each volume attach operation which we haven't done for unshelve. A release note is added to enable 'cross_az_attach' in case the user does not care about AZ. The compute service version had to be bumped as the old computes still perform 'check_attach', which will fail when the API reserves the volume and the volume state moves to 'attaching'. If the computes are not new enough the old check will be called as opposed to 'reserve_volume'. Closes-Bug: #1581230 Change-Id: I3a3caa4c566ecc132aa2699f8c7e5987bbcc863a
This commit is contained in:
parent
8b498ce199
commit
63805735c2
@ -13,7 +13,7 @@
|
||||
"disabled_reason": null,
|
||||
"report_count": 1,
|
||||
"forced_down": false,
|
||||
"version": 16
|
||||
"version": 17
|
||||
}
|
||||
},
|
||||
"event_type": "service.update",
|
||||
|
@ -105,6 +105,7 @@ AGGREGATE_ACTION_UPDATE = 'Update'
|
||||
AGGREGATE_ACTION_UPDATE_META = 'UpdateMeta'
|
||||
AGGREGATE_ACTION_DELETE = 'Delete'
|
||||
AGGREGATE_ACTION_ADD = 'Add'
|
||||
BFV_RESERVE_MIN_COMPUTE_VERSION = 17
|
||||
|
||||
# FIXME(danms): Keep a global cache of the cells we find the
|
||||
# first time we look. This needs to be refreshed on a timer or
|
||||
@ -1354,15 +1355,30 @@ class API(base.Base):
|
||||
"destination_type 'volume' need to have a non-zero "
|
||||
"size specified"))
|
||||
elif volume_id is not None:
|
||||
min_compute_version = objects.Service.get_minimum_version(
|
||||
context, 'nova-compute')
|
||||
try:
|
||||
volume = self.volume_api.get(context, volume_id)
|
||||
self.volume_api.check_attach(context,
|
||||
volume,
|
||||
instance=instance)
|
||||
# NOTE(ildikov): The boot from volume operation did not
|
||||
# reserve the volume before Pike and as the older computes
|
||||
# are running 'check_attach' which will fail if the volume
|
||||
# is in 'attaching' state; if the compute service version
|
||||
# is not high enough we will just perform the old check as
|
||||
# opposed to reserving the volume here.
|
||||
if (min_compute_version >=
|
||||
BFV_RESERVE_MIN_COMPUTE_VERSION):
|
||||
volume = self._check_attach_and_reserve_volume(
|
||||
context, volume_id, instance)
|
||||
else:
|
||||
# NOTE(ildikov): This call is here only for backward
|
||||
# compatibility can be removed after Ocata EOL.
|
||||
volume = self._check_attach(context, volume_id,
|
||||
instance)
|
||||
bdm.volume_size = volume.get('size')
|
||||
except (exception.CinderConnectionFailed,
|
||||
exception.InvalidVolume):
|
||||
raise
|
||||
except exception.InvalidInput as exc:
|
||||
raise exception.InvalidVolume(reason=exc.format_message())
|
||||
except Exception:
|
||||
raise exception.InvalidBDMVolume(id=volume_id)
|
||||
elif snapshot_id is not None:
|
||||
@ -1404,6 +1420,23 @@ class API(base.Base):
|
||||
if num_local > max_local:
|
||||
raise exception.InvalidBDMLocalsLimit()
|
||||
|
||||
def _check_attach(self, context, volume_id, instance):
|
||||
# TODO(ildikov): This check_attach code is kept only for backward
|
||||
# compatibility and should be removed after Ocata EOL.
|
||||
volume = self.volume_api.get(context, volume_id)
|
||||
if volume['status'] != 'available':
|
||||
msg = _("volume '%(vol)s' status must be 'available'. Currently "
|
||||
"in '%(status)s'") % {'vol': volume['id'],
|
||||
'status': volume['status']}
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
if volume['attach_status'] == 'attached':
|
||||
msg = _("volume %s already attached") % volume['id']
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
self.volume_api.check_availability_zone(context, volume,
|
||||
instance=instance)
|
||||
|
||||
return volume
|
||||
|
||||
def _populate_instance_names(self, instance, num_instances):
|
||||
"""Populate instance display_name and hostname."""
|
||||
display_name = instance.get('display_name')
|
||||
@ -3551,6 +3584,8 @@ class API(base.Base):
|
||||
instance=instance)
|
||||
self.volume_api.reserve_volume(context, volume_id)
|
||||
|
||||
return volume
|
||||
|
||||
def _attach_volume(self, context, instance, volume_id, device,
|
||||
disk_bus, device_type):
|
||||
"""Attach an existing volume to an existing instance.
|
||||
@ -3689,7 +3724,8 @@ class API(base.Base):
|
||||
msg = _("New volume must be the same size or larger.")
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
self.volume_api.check_detach(context, old_volume)
|
||||
self.volume_api.check_attach(context, new_volume, instance=instance)
|
||||
self.volume_api.check_availability_zone(context, new_volume,
|
||||
instance=instance)
|
||||
self.volume_api.begin_detaching(context, old_volume['id'])
|
||||
self.volume_api.reserve_volume(context, new_volume['id'])
|
||||
try:
|
||||
|
@ -1572,8 +1572,7 @@ class ComputeManager(manager.Manager):
|
||||
bdm.update(values)
|
||||
bdm.save()
|
||||
|
||||
def _prep_block_device(self, context, instance, bdms,
|
||||
do_check_attach=True):
|
||||
def _prep_block_device(self, context, instance, bdms):
|
||||
"""Set up the block device for an instance with error logging."""
|
||||
try:
|
||||
self._add_missing_dev_names(bdms, instance)
|
||||
@ -1581,7 +1580,6 @@ class ComputeManager(manager.Manager):
|
||||
mapping = driver.block_device_info_get_mapping(block_device_info)
|
||||
driver_block_device.attach_block_devices(
|
||||
mapping, context, instance, self.volume_api, self.driver,
|
||||
do_check_attach=do_check_attach,
|
||||
wait_func=self._await_block_device_map_created)
|
||||
|
||||
self._block_device_info_to_legacy(block_device_info)
|
||||
@ -4431,8 +4429,7 @@ class ComputeManager(manager.Manager):
|
||||
|
||||
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
|
||||
context, instance.uuid)
|
||||
block_device_info = self._prep_block_device(context, instance, bdms,
|
||||
do_check_attach=False)
|
||||
block_device_info = self._prep_block_device(context, instance, bdms)
|
||||
scrubbed_keys = self._unshelve_instance_key_scrub(instance)
|
||||
|
||||
if node is None:
|
||||
@ -4786,7 +4783,7 @@ class ComputeManager(manager.Manager):
|
||||
instance=instance)
|
||||
try:
|
||||
bdm.attach(context, instance, self.volume_api, self.driver,
|
||||
do_check_attach=False, do_driver_attach=True)
|
||||
do_driver_attach=True)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception(_LE("Failed to attach %(volume_id)s "
|
||||
|
@ -30,7 +30,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# NOTE(danms): This is the global service version counter
|
||||
SERVICE_VERSION = 16
|
||||
SERVICE_VERSION = 17
|
||||
|
||||
|
||||
# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
|
||||
@ -95,6 +95,11 @@ SERVICE_VERSION_HISTORY = (
|
||||
# Version 16: Indicate that nova-compute will refuse to start if it doesn't
|
||||
# have a placement section configured.
|
||||
{'compute_rpc': '4.13'},
|
||||
# Version 17: Add 'reserve_volume' to the boot from volume flow and
|
||||
# remove 'check_attach'. The service version bump is needed to fall back to
|
||||
# the old check in the API as the old computes fail if the volume is moved
|
||||
# to 'attaching' state by reserve.
|
||||
{'compute_rpc': '4.13'},
|
||||
)
|
||||
|
||||
|
||||
|
@ -372,6 +372,8 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
lambda *a, **kw: None)
|
||||
self.stub_out('nova.volume.cinder.API.detach',
|
||||
lambda *a, **kw: None)
|
||||
self.stub_out('nova.volume.cinder.API.check_availability_zone',
|
||||
lambda *a, **kw: None)
|
||||
self.stub_out('eventlet.greenthread.sleep',
|
||||
lambda *a, **kw: None)
|
||||
|
||||
@ -864,17 +866,24 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
for expected, got in zip(expected_result, preped_bdm):
|
||||
self.assertThat(expected, matchers.IsSubDictOf(got))
|
||||
|
||||
def test_validate_bdm(self):
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=17)
|
||||
def test_validate_bdm(self, mock_get_min_ver):
|
||||
def fake_get(self, context, res_id):
|
||||
return {'id': res_id, 'size': 4}
|
||||
|
||||
def fake_check_attach(*args, **kwargs):
|
||||
def fake_check_availability_zone(*args, **kwargs):
|
||||
pass
|
||||
|
||||
def fake_reserve_volume(*args, **kwargs):
|
||||
pass
|
||||
|
||||
self.stub_out('nova.volume.cinder.API.get', fake_get)
|
||||
self.stub_out('nova.volume.cinder.API.get_snapshot', fake_get)
|
||||
self.stub_out('nova.volume.cinder.API.check_attach',
|
||||
fake_check_attach)
|
||||
self.stub_out('nova.volume.cinder.API.check_availability_zone',
|
||||
fake_check_availability_zone)
|
||||
self.stub_out('nova.volume.cinder.API.reserve_volume',
|
||||
fake_reserve_volume)
|
||||
|
||||
volume_id = '55555555-aaaa-bbbb-cccc-555555555555'
|
||||
snapshot_id = '66666666-aaaa-bbbb-cccc-555555555555'
|
||||
@ -1085,13 +1094,52 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
self.context, self.instance,
|
||||
instance_type, all_mappings)
|
||||
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=16)
|
||||
@mock.patch.object(nova.volume.cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'check_availability_zone')
|
||||
def test_validate_bdm_volume_old_compute(self, mock_check_av_zone,
|
||||
mock_get, mock_get_min_ver):
|
||||
instance = self._create_fake_instance_obj()
|
||||
instance_type = {'ephemeral_gb': 2}
|
||||
volume_id = uuids.volume_id
|
||||
mappings = [
|
||||
fake_block_device.FakeDbBlockDeviceDict({
|
||||
'device_name': '/dev/sda1',
|
||||
'source_type': 'volume',
|
||||
'destination_type': 'volume',
|
||||
'device_type': 'disk',
|
||||
'volume_id': volume_id,
|
||||
'guest_format': None,
|
||||
'boot_index': 0,
|
||||
}, anon=True)
|
||||
]
|
||||
mappings = block_device_obj.block_device_make_list_from_dicts(
|
||||
self.context, mappings)
|
||||
|
||||
volume = {'id': volume_id,
|
||||
'size': 4,
|
||||
'status': 'available',
|
||||
'attach_status': 'detached',
|
||||
'multiattach': False}
|
||||
|
||||
mock_get.return_value = volume
|
||||
self.compute_api._validate_bdm(self.context, instance,
|
||||
instance_type, mappings)
|
||||
self.assertEqual(4, mappings[0].volume_size)
|
||||
mock_check_av_zone.assert_called_once_with(self.context, volume,
|
||||
instance=instance)
|
||||
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=17)
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'check_availability_zone')
|
||||
@mock.patch.object(cinder.API, 'reserve_volume',
|
||||
side_effect=exception.InvalidVolume(reason='error'))
|
||||
def test_validate_bdm_media_service_invalid_volume(self, mock_reserve_vol,
|
||||
mock_check_av_zone,
|
||||
mock_get):
|
||||
mock_get,
|
||||
mock_get_min_ver):
|
||||
volume_id = uuids.volume_id
|
||||
instance_type = {'swap': 1, 'ephemeral_gb': 1}
|
||||
bdms = [fake_block_device.FakeDbBlockDeviceDict({
|
||||
@ -1137,8 +1185,11 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
instance_type, bdms)
|
||||
mock_get.assert_called_with(self.context, volume_id)
|
||||
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=17)
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
def test_validate_bdm_media_service_volume_not_found(self, mock_get):
|
||||
def test_validate_bdm_media_service_volume_not_found(self, mock_get,
|
||||
mock_get_min_ver):
|
||||
volume_id = uuids.volume_id
|
||||
instance_type = {'swap': 1, 'ephemeral_gb': 1}
|
||||
bdms = [fake_block_device.FakeDbBlockDeviceDict({
|
||||
@ -1160,10 +1211,15 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
self.context, self.instance,
|
||||
instance_type, bdms)
|
||||
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=17)
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'check_availability_zone')
|
||||
def test_validate_bdm_media_service_valid(self, mock_check_av_zone,
|
||||
mock_get):
|
||||
@mock.patch.object(cinder.API, 'reserve_volume')
|
||||
def test_validate_bdm_media_service_valid(self, mock_reserve_vol,
|
||||
mock_check_av_zone,
|
||||
mock_get,
|
||||
mock_get_min_ver):
|
||||
volume_id = uuids.volume_id
|
||||
instance_type = {'swap': 1, 'ephemeral_gb': 1}
|
||||
bdms = [fake_block_device.FakeDbBlockDeviceDict({
|
||||
@ -1188,7 +1244,8 @@ class ComputeVolumeTestCase(BaseTestCase):
|
||||
instance_type, bdms)
|
||||
mock_get.assert_called_once_with(self.context, volume_id)
|
||||
mock_check_av_zone.assert_called_once_with(self.context, volume,
|
||||
self.instance)
|
||||
instance=self.instance)
|
||||
mock_reserve_vol.assert_called_once_with(self.context, volume_id)
|
||||
|
||||
def test_volume_snapshot_create(self):
|
||||
self.assertRaises(messaging.ExpectedException,
|
||||
@ -1926,7 +1983,7 @@ class ComputeTestCase(BaseTestCase):
|
||||
LOG.info("Running instances: %s", instances)
|
||||
self.assertEqual(len(instances), 1)
|
||||
|
||||
def fake_check_attach(*args, **kwargs):
|
||||
def fake_check_availability_zone(*args, **kwargs):
|
||||
pass
|
||||
|
||||
def fake_reserve_volume(*args, **kwargs):
|
||||
@ -1963,7 +2020,8 @@ class ComputeTestCase(BaseTestCase):
|
||||
return bdm
|
||||
|
||||
self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
|
||||
self.stub_out('nova.volume.cinder.API.check_attach', fake_check_attach)
|
||||
self.stub_out('nova.volume.cinder.API.check_availability_zone',
|
||||
fake_check_availability_zone)
|
||||
self.stub_out('nova.volume.cinder.API.reserve_volume',
|
||||
fake_reserve_volume)
|
||||
self.stub_out('nova.volume.cinder.API.terminate_connection',
|
||||
@ -4653,10 +4711,11 @@ class ComputeTestCase(BaseTestCase):
|
||||
return volume
|
||||
self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
|
||||
|
||||
def fake_volume_check_attach(self, context, volume_id, instance):
|
||||
def fake_volume_check_availability_zone(self, context,
|
||||
volume_id, instance):
|
||||
pass
|
||||
self.stub_out('nova.volume.cinder.API.check_attach',
|
||||
fake_volume_check_attach)
|
||||
self.stub_out('nova.volume.cinder.API.check_availability_zone',
|
||||
fake_volume_check_availability_zone)
|
||||
|
||||
def fake_get_volume_encryption_metadata(self, context, volume_id):
|
||||
return {}
|
||||
@ -9304,7 +9363,7 @@ class ComputeAPITestCase(BaseTestCase):
|
||||
return {'id': volume_id}
|
||||
|
||||
self.stub_out('nova.volume.cinder.API.get', fake_volume_get)
|
||||
self.stub_out('nova.volume.cinder.API.check_attach', fake)
|
||||
self.stub_out('nova.volume.cinder.API.check_availability_zone', fake)
|
||||
self.stub_out('nova.volume.cinder.API.reserve_volume', fake)
|
||||
|
||||
instance = fake_instance.fake_instance_obj(None, **{
|
||||
|
@ -3308,12 +3308,15 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
self.context,
|
||||
bdms, legacy_bdm=True)
|
||||
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=17)
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'check_attach',
|
||||
side_effect=exception.InvalidVolume(reason='error'))
|
||||
def test_validate_bdm_with_error_volume(self, mock_check_attach, mock_get):
|
||||
# Tests that an InvalidVolume exception raised from
|
||||
# volume_api.check_attach due to the volume status not being
|
||||
@mock.patch.object(cinder.API, 'reserve_volume',
|
||||
side_effect=exception.InvalidInput(reason='error'))
|
||||
def test_validate_bdm_with_error_volume(self, mock_reserve_volume,
|
||||
mock_get, mock_get_min_ver):
|
||||
# Tests that an InvalidInput exception raised from
|
||||
# volume_api.reserve_volume due to the volume status not being
|
||||
# 'available' results in _validate_bdm re-raising InvalidVolume.
|
||||
instance = self._create_instance_obj()
|
||||
instance_type = self._create_flavor()
|
||||
@ -3338,14 +3341,17 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
instance, instance_type, bdms)
|
||||
|
||||
mock_get.assert_called_once_with(self.context, volume_id)
|
||||
mock_check_attach.assert_called_once_with(
|
||||
self.context, volume_info, instance=instance)
|
||||
mock_reserve_volume.assert_called_once_with(
|
||||
self.context, volume_id)
|
||||
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=17)
|
||||
@mock.patch.object(cinder.API, 'get_snapshot',
|
||||
side_effect=exception.CinderConnectionFailed(reason='error'))
|
||||
@mock.patch.object(cinder.API, 'get',
|
||||
side_effect=exception.CinderConnectionFailed(reason='error'))
|
||||
def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot):
|
||||
def test_validate_bdm_with_cinder_down(self, mock_get, mock_get_snapshot,
|
||||
mock_get_min_ver):
|
||||
instance = self._create_instance_obj()
|
||||
instance_type = self._create_flavor()
|
||||
bdm = [objects.BlockDeviceMapping(
|
||||
@ -3445,16 +3451,26 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
|
||||
do_test()
|
||||
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=17)
|
||||
@mock.patch.object(cinder.API, 'get',
|
||||
side_effect=exception.CinderConnectionFailed(reason='error'))
|
||||
def test_provision_instances_with_cinder_down(self, mock_get):
|
||||
def test_provision_instances_with_cinder_down(self, mock_get,
|
||||
mock_get_min_ver):
|
||||
self._test_provision_instances_with_cinder_error(
|
||||
expected_exception=exception.CinderConnectionFailed)
|
||||
|
||||
@mock.patch.object(cinder.API, 'get',
|
||||
return_value={'id': 1, 'status': 'error',
|
||||
'attach_status': 'detached'})
|
||||
def test_provision_instances_with_error_volume(self, mock_get):
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=17)
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'check_availability_zone')
|
||||
@mock.patch.object(cinder.API, 'reserve_volume',
|
||||
side_effect=exception.InvalidInput(reason='error'))
|
||||
def test_provision_instances_with_error_volume(self,
|
||||
mock_cinder_check_av_zone,
|
||||
mock_reserve_volume,
|
||||
mock_get,
|
||||
mock_get_min_ver):
|
||||
self._test_provision_instances_with_cinder_error(
|
||||
expected_exception=exception.InvalidVolume)
|
||||
|
||||
@ -3505,9 +3521,12 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
@mock.patch.object(objects.RequestSpec, 'from_components')
|
||||
@mock.patch.object(objects.BuildRequest, 'create')
|
||||
@mock.patch.object(objects.InstanceMapping, 'create')
|
||||
def do_test(_mock_inst_mapping_create, mock_build_req,
|
||||
mock_req_spec_from_components, _mock_ensure_default,
|
||||
mock_check_num_inst_quota, mock_volume, mock_inst_create):
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=17)
|
||||
def do_test(mock_get_min_ver, _mock_inst_mapping_create,
|
||||
mock_build_req, mock_req_spec_from_components,
|
||||
_mock_ensure_default, mock_check_num_inst_quota,
|
||||
mock_volume, mock_inst_create):
|
||||
|
||||
min_count = 1
|
||||
max_count = 2
|
||||
@ -3651,11 +3670,16 @@ class _ComputeAPIUnitTestMixIn(object):
|
||||
self.assertEqual(ctxt.project_id, inst_mapping_mock.project_id)
|
||||
do_test()
|
||||
|
||||
@mock.patch.object(objects.Service, 'get_minimum_version',
|
||||
return_value=17)
|
||||
@mock.patch.object(cinder.API, 'get')
|
||||
@mock.patch.object(cinder.API, 'check_attach',
|
||||
side_effect=(None, exception.InvalidVolume(reason='error')))
|
||||
@mock.patch.object(cinder.API, 'check_availability_zone',)
|
||||
@mock.patch.object(cinder.API, 'reserve_volume',
|
||||
side_effect=(None, exception.InvalidInput(reason='error')))
|
||||
def test_provision_instances_cleans_up_when_volume_invalid(self,
|
||||
_mock_cinder_get, _mock_cinder_check_attach):
|
||||
_mock_cinder_reserve_volume,
|
||||
_mock_cinder_check_availability_zone, _mock_cinder_get,
|
||||
_mock_get_min_ver):
|
||||
@mock.patch.object(self.compute_api, '_check_num_instances_quota')
|
||||
@mock.patch.object(objects, 'Instance')
|
||||
@mock.patch.object(self.compute_api.security_group_api,
|
||||
|
@ -3106,7 +3106,7 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase):
|
||||
self.assertTrue(mock_power_off.called)
|
||||
self.assertFalse(mock_destroy.called)
|
||||
|
||||
def _attach(context, instance, bdms, do_check_attach=True):
|
||||
def _attach(context, instance, bdms):
|
||||
return {'block_device_mapping': 'shared_block_storage'}
|
||||
|
||||
def _spawn(context, instance, image_meta, injected_files,
|
||||
|
@ -285,7 +285,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
|
||||
self.compute._notify_about_instance_usage(self.context, instance,
|
||||
'unshelve.start')
|
||||
self.compute._prep_block_device(self.context, instance,
|
||||
mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
|
||||
mox.IgnoreArg()).AndReturn('fake_bdm')
|
||||
self.compute.network_api.setup_instance_network_on_host(
|
||||
self.context, instance, self.compute.host)
|
||||
self.compute.driver.spawn(self.context, instance,
|
||||
@ -367,7 +367,7 @@ class ShelveComputeManagerTestCase(test_compute.BaseTestCase):
|
||||
'unshelve.start')
|
||||
|
||||
self.compute._prep_block_device(self.context, instance,
|
||||
mox.IgnoreArg(), do_check_attach=False).AndReturn('fake_bdm')
|
||||
mox.IgnoreArg()).AndReturn('fake_bdm')
|
||||
self.compute.network_api.setup_instance_network_on_host(
|
||||
self.context, instance, self.compute.host)
|
||||
self.rt.instance_claim(self.context, instance, node, limits).AndReturn(
|
||||
|
@ -179,13 +179,7 @@ class API(object):
|
||||
self.volume_list = [v for v in self.volume_list
|
||||
if v['id'] != volume_id]
|
||||
|
||||
def check_attach(self, context, volume, instance=None):
|
||||
if volume['status'] != 'available':
|
||||
msg = "Status of volume '%s' must be available" % volume
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
if volume['attach_status'] == 'attached':
|
||||
msg = "already attached"
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
def check_availability_zone(self, context, volume, instance=None):
|
||||
if instance and not CONF.cinder.cross_az_attach:
|
||||
if instance['availability_zone'] != volume['availability_zone']:
|
||||
msg = "Instance and volume not in same availability_zone"
|
||||
|
@ -384,11 +384,10 @@ class TestDriverBlockDevice(test.NoDBTestCase):
|
||||
self._test_call_wait_func(False)
|
||||
|
||||
def _test_volume_attach(self, driver_bdm, bdm_dict,
|
||||
fake_volume, check_attach=True,
|
||||
fail_check_attach=False, driver_attach=False,
|
||||
fail_driver_attach=False, volume_attach=True,
|
||||
fail_volume_attach=False, access_mode='rw',
|
||||
availability_zone=None):
|
||||
fake_volume, fail_check_av_zone=False,
|
||||
driver_attach=False, fail_driver_attach=False,
|
||||
volume_attach=True, fail_volume_attach=False,
|
||||
access_mode='rw', availability_zone=None):
|
||||
elevated_context = self.context.elevated()
|
||||
self.stubs.Set(self.context, 'elevated',
|
||||
lambda: elevated_context)
|
||||
@ -406,16 +405,17 @@ class TestDriverBlockDevice(test.NoDBTestCase):
|
||||
|
||||
self.volume_api.get(self.context,
|
||||
fake_volume['id']).AndReturn(fake_volume)
|
||||
if check_attach:
|
||||
if not fail_check_attach:
|
||||
self.volume_api.check_attach(self.context, fake_volume,
|
||||
instance=instance).AndReturn(None)
|
||||
else:
|
||||
self.volume_api.check_attach(self.context, fake_volume,
|
||||
instance=instance).AndRaise(
|
||||
test.TestingException)
|
||||
driver_bdm._bdm_obj.save().AndReturn(None)
|
||||
return instance, expected_conn_info
|
||||
if not fail_check_av_zone:
|
||||
self.volume_api.check_availability_zone(self.context,
|
||||
fake_volume,
|
||||
instance=instance).AndReturn(None)
|
||||
else:
|
||||
self.volume_api.check_availability_zone(self.context,
|
||||
fake_volume,
|
||||
instance=instance).AndRaise(
|
||||
test.TestingException)
|
||||
driver_bdm._bdm_obj.save().AndReturn(None)
|
||||
return instance, expected_conn_info
|
||||
|
||||
self.virt_driver.get_volume_connector(instance).AndReturn(connector)
|
||||
self.volume_api.initialize_connection(
|
||||
@ -518,13 +518,13 @@ class TestDriverBlockDevice(test.NoDBTestCase):
|
||||
self.assertEqual(expected_conn_info, test_bdm['connection_info'])
|
||||
self.assertEqual(42, test_bdm.volume_size)
|
||||
|
||||
def test_volume_attach_check_attach_fails(self):
|
||||
def test_volume_attach_check_av_zone_fails(self):
|
||||
test_bdm = self.driver_classes['volume'](
|
||||
self.volume_bdm)
|
||||
volume = {'id': 'fake-volume-id-1'}
|
||||
|
||||
instance, _ = self._test_volume_attach(
|
||||
test_bdm, self.volume_bdm, volume, fail_check_attach=True)
|
||||
test_bdm, self.volume_bdm, volume, fail_check_av_zone=True)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.assertRaises(test.TestingException, test_bdm.attach, self.context,
|
||||
@ -537,14 +537,13 @@ class TestDriverBlockDevice(test.NoDBTestCase):
|
||||
'attach_status': 'detached'}
|
||||
|
||||
instance, expected_conn_info = self._test_volume_attach(
|
||||
test_bdm, self.volume_bdm, volume, check_attach=False,
|
||||
driver_attach=False)
|
||||
test_bdm, self.volume_bdm, volume, driver_attach=False)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
test_bdm.attach(self.context, instance,
|
||||
self.volume_api, self.virt_driver,
|
||||
do_check_attach=False, do_driver_attach=False)
|
||||
do_driver_attach=False)
|
||||
self.assertThat(test_bdm['connection_info'],
|
||||
matchers.DictMatches(expected_conn_info))
|
||||
|
||||
@ -555,14 +554,13 @@ class TestDriverBlockDevice(test.NoDBTestCase):
|
||||
'attach_status': 'detached'}
|
||||
|
||||
instance, expected_conn_info = self._test_volume_attach(
|
||||
test_bdm, self.volume_bdm, volume, check_attach=False,
|
||||
driver_attach=True)
|
||||
test_bdm, self.volume_bdm, volume, driver_attach=True)
|
||||
|
||||
self.mox.ReplayAll()
|
||||
|
||||
test_bdm.attach(self.context, instance,
|
||||
self.volume_api, self.virt_driver,
|
||||
do_check_attach=False, do_driver_attach=True)
|
||||
do_driver_attach=True)
|
||||
self.assertThat(test_bdm['connection_info'],
|
||||
matchers.DictMatches(expected_conn_info))
|
||||
|
||||
@ -745,8 +743,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
|
||||
self.mox.StubOutWithMock(self.volume_api, 'create')
|
||||
|
||||
volume_class.attach(self.context, instance, self.volume_api,
|
||||
self.virt_driver, do_check_attach=True
|
||||
).AndReturn(None)
|
||||
self.virt_driver).AndReturn(None)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
test_bdm.attach(self.context, instance, self.volume_api,
|
||||
@ -854,8 +851,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
|
||||
self.mox.StubOutWithMock(self.volume_api, 'create')
|
||||
|
||||
volume_class.attach(self.context, instance, self.volume_api,
|
||||
self.virt_driver, do_check_attach=True
|
||||
).AndReturn(None)
|
||||
self.virt_driver).AndReturn(None)
|
||||
self.mox.ReplayAll()
|
||||
|
||||
test_bdm.attach(self.context, instance, self.volume_api,
|
||||
@ -922,8 +918,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
|
||||
'', availability_zone=None)
|
||||
vol_attach.assert_called_once_with(self.context, instance,
|
||||
self.volume_api,
|
||||
self.virt_driver,
|
||||
do_check_attach=True)
|
||||
self.virt_driver)
|
||||
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
|
||||
|
||||
def test_blank_attach_volume_cinder_cross_az_attach_false(self):
|
||||
@ -954,8 +949,7 @@ class TestDriverBlockDevice(test.NoDBTestCase):
|
||||
'', availability_zone='test-az')
|
||||
vol_attach.assert_called_once_with(self.context, instance,
|
||||
self.volume_api,
|
||||
self.virt_driver,
|
||||
do_check_attach=True)
|
||||
self.virt_driver)
|
||||
self.assertEqual('fake-volume-id-2', test_bdm.volume_id)
|
||||
|
||||
def test_convert_block_devices(self):
|
||||
|
@ -181,17 +181,6 @@ class CinderApiTestCase(test.NoDBTestCase):
|
||||
mock_volumes.list.assert_called_once_with(detailed=True,
|
||||
search_opts={'id': 'id1'})
|
||||
|
||||
def test_check_attach_volume_status_error(self):
|
||||
volume = {'id': 'fake', 'status': 'error'}
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
self.api.check_attach, self.ctx, volume)
|
||||
|
||||
def test_check_attach_volume_already_attached(self):
|
||||
volume = {'id': 'fake', 'status': 'available'}
|
||||
volume['attach_status'] = "attached"
|
||||
self.assertRaises(exception.InvalidVolume,
|
||||
self.api.check_attach, self.ctx, volume)
|
||||
|
||||
@mock.patch.object(cinder.az, 'get_instance_availability_zone',
|
||||
return_value='zone1')
|
||||
def test_check_availability_zone_differs(self, mock_get_instance_az):
|
||||
@ -207,21 +196,6 @@ class CinderApiTestCase(test.NoDBTestCase):
|
||||
self.ctx, volume, instance)
|
||||
mock_get_instance_az.assert_called_once_with(self.ctx, instance)
|
||||
|
||||
def test_check_attach(self):
|
||||
volume = {'status': 'available'}
|
||||
volume['attach_status'] = "detached"
|
||||
volume['availability_zone'] = 'zone1'
|
||||
volume['multiattach'] = False
|
||||
instance = {'availability_zone': 'zone1', 'host': 'fakehost'}
|
||||
CONF.set_override('cross_az_attach', False, group='cinder')
|
||||
|
||||
with mock.patch.object(cinder.az, 'get_instance_availability_zone',
|
||||
side_effect=lambda context, instance: 'zone1'):
|
||||
self.assertIsNone(self.api.check_attach(
|
||||
self.ctx, volume, instance))
|
||||
|
||||
CONF.reset()
|
||||
|
||||
def test_check_detach(self):
|
||||
volume = {'id': 'fake', 'status': 'in-use',
|
||||
'attach_status': 'attached',
|
||||
|
@ -244,10 +244,10 @@ class DriverVolumeBlockDevice(DriverBlockDevice):
|
||||
|
||||
@update_db
|
||||
def attach(self, context, instance, volume_api, virt_driver,
|
||||
do_check_attach=True, do_driver_attach=False, **kwargs):
|
||||
do_driver_attach=False, **kwargs):
|
||||
volume = volume_api.get(context, self.volume_id)
|
||||
if do_check_attach:
|
||||
volume_api.check_attach(context, volume, instance=instance)
|
||||
volume_api.check_availability_zone(context, volume,
|
||||
instance=instance)
|
||||
|
||||
volume_id = volume['id']
|
||||
context = context.elevated()
|
||||
@ -367,7 +367,7 @@ class DriverSnapshotBlockDevice(DriverVolumeBlockDevice):
|
||||
_proxy_as_attr = set(['volume_size', 'volume_id', 'snapshot_id'])
|
||||
|
||||
def attach(self, context, instance, volume_api,
|
||||
virt_driver, wait_func=None, do_check_attach=True):
|
||||
virt_driver, wait_func=None):
|
||||
|
||||
if not self.volume_id:
|
||||
av_zone = _get_volume_create_az_value(instance)
|
||||
@ -382,8 +382,7 @@ class DriverSnapshotBlockDevice(DriverVolumeBlockDevice):
|
||||
|
||||
# Call the volume attach now
|
||||
super(DriverSnapshotBlockDevice, self).attach(
|
||||
context, instance, volume_api, virt_driver,
|
||||
do_check_attach=do_check_attach)
|
||||
context, instance, volume_api, virt_driver)
|
||||
|
||||
|
||||
class DriverImageBlockDevice(DriverVolumeBlockDevice):
|
||||
@ -392,7 +391,7 @@ class DriverImageBlockDevice(DriverVolumeBlockDevice):
|
||||
_proxy_as_attr = set(['volume_size', 'volume_id', 'image_id'])
|
||||
|
||||
def attach(self, context, instance, volume_api,
|
||||
virt_driver, wait_func=None, do_check_attach=True):
|
||||
virt_driver, wait_func=None):
|
||||
if not self.volume_id:
|
||||
av_zone = _get_volume_create_az_value(instance)
|
||||
vol = volume_api.create(context, self.volume_size,
|
||||
@ -404,8 +403,7 @@ class DriverImageBlockDevice(DriverVolumeBlockDevice):
|
||||
self.volume_id = vol['id']
|
||||
|
||||
super(DriverImageBlockDevice, self).attach(
|
||||
context, instance, volume_api, virt_driver,
|
||||
do_check_attach=do_check_attach)
|
||||
context, instance, volume_api, virt_driver)
|
||||
|
||||
|
||||
class DriverBlankBlockDevice(DriverVolumeBlockDevice):
|
||||
@ -414,7 +412,7 @@ class DriverBlankBlockDevice(DriverVolumeBlockDevice):
|
||||
_proxy_as_attr = set(['volume_size', 'volume_id', 'image_id'])
|
||||
|
||||
def attach(self, context, instance, volume_api,
|
||||
virt_driver, wait_func=None, do_check_attach=True):
|
||||
virt_driver, wait_func=None):
|
||||
if not self.volume_id:
|
||||
vol_name = instance.uuid + '-blank-vol'
|
||||
av_zone = _get_volume_create_az_value(instance)
|
||||
@ -426,8 +424,7 @@ class DriverBlankBlockDevice(DriverVolumeBlockDevice):
|
||||
self.volume_id = vol['id']
|
||||
|
||||
super(DriverBlankBlockDevice, self).attach(
|
||||
context, instance, volume_api, virt_driver,
|
||||
do_check_attach=do_check_attach)
|
||||
context, instance, volume_api, virt_driver)
|
||||
|
||||
|
||||
def _convert_block_devices(device_type, block_device_mapping):
|
||||
|
@ -254,19 +254,6 @@ class API(object):
|
||||
"status": volume['status']}
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
def check_attach(self, context, volume, instance=None):
|
||||
# TODO(vish): abstract status checking?
|
||||
if volume['status'] != "available":
|
||||
msg = _("volume '%(vol)s' status must be 'available'. Currently "
|
||||
"in '%(status)s'") % {'vol': volume['id'],
|
||||
'status': volume['status']}
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
if volume['attach_status'] == "attached":
|
||||
msg = _("volume %s already attached") % volume['id']
|
||||
raise exception.InvalidVolume(reason=msg)
|
||||
|
||||
self.check_availability_zone(context, volume, instance)
|
||||
|
||||
def check_availability_zone(self, context, volume, instance=None):
|
||||
"""Ensure that the availability zone is the same."""
|
||||
|
||||
|
26
releasenotes/notes/remove-check-attach-68b9ec781ad184ff.yaml
Normal file
26
releasenotes/notes/remove-check-attach-68b9ec781ad184ff.yaml
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
fixes:
|
||||
- |
|
||||
Fixes `bug 1581230`_ by removing the internal ``check_attach`` call from
|
||||
the Nova code as it can cause race conditions and the checks are handled by
|
||||
``reserve_volume`` in Cinder. ``reserve_volume`` is called in every volume
|
||||
attach scenario to provide the necessary checks and volume state validation
|
||||
on the Cinder side.
|
||||
other:
|
||||
- |
|
||||
By removing the ``check_attach`` internal call from Nova, small behavioral
|
||||
changes were introduced.
|
||||
|
||||
``reserve_volume`` call was added to the boot from volume scenario. In case
|
||||
a failure occurs while building the instance, the instance goes into ERROR
|
||||
state while the volume stays in ``attaching`` state. The volume state will
|
||||
be set back to ``available`` when the instance gets deleted.
|
||||
|
||||
Additional availability zone check is added to the volume attach flow,
|
||||
which results in an availability zone check when an instance gets
|
||||
unshelved. In case the deployment is not sensitive to availability zones
|
||||
and not using the AvailabilityZoneFilter scheduler filter the current
|
||||
default settings (cross_az_attach=True) are allowing to perform unshelve
|
||||
the same way as before this change without additional configuration.
|
||||
|
||||
.. _`bug 1581230`: https://bugs.launchpad.net/nova/+bug/1581230
|
Loading…
Reference in New Issue
Block a user