Fix EC2 volume attachment state at attaching stage.

While volume is still attaching/detaching, AWS reports 'attaching'/
'detaching' states in the volume attachment info and instance bdm.
But Nova EC2 reports None, 'attached', or 'detached' states.

This fix set 'attaching'/'detaching' states in output results when
it's possible and while corresponding process is still active.

Change-Id: I61ef5afb2c3dea2a931dbf18a8a428460ce9cc2e
Closes-Bug: #1355285
This commit is contained in:
Feodor Tersin 2014-08-13 10:02:10 +04:00
parent 795091ecec
commit c1f9ab8d3d
2 changed files with 71 additions and 11 deletions

View File

@ -825,12 +825,13 @@ class CloudController(object):
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
v['createTime'] = volume['created_at']
if volume['attach_status'] == 'attached':
v['attachmentSet'] = [{'attachTime': volume['attach_time'],
if v['status'] == 'in-use':
v['attachmentSet'] = [{'attachTime': volume.get('attach_time'),
'deleteOnTermination': False,
'device': volume['mountpoint'],
'instanceId': instance_ec2_id,
'status': 'attached',
'status': self._get_volume_attach_status(
volume),
'volumeId': v['volumeId']}]
else:
v['attachmentSet'] = [{}]
@ -1113,7 +1114,7 @@ class CloudController(object):
ebs = {'volumeId': ec2utils.id_to_ec2_vol_id(volume_id),
'deleteOnTermination': bdm.delete_on_termination,
'attachTime': vol['attach_time'] or '',
'status': vol['attach_status'], }
'status': self._get_volume_attach_status(vol), }
res = {'deviceName': bdm.device_name,
'ebs': ebs, }
mapping.append(res)
@ -1122,6 +1123,12 @@ class CloudController(object):
result['blockDeviceMapping'] = mapping
result['rootDeviceType'] = root_device_type
@staticmethod
def _get_volume_attach_status(volume):
return (volume['status']
if volume['status'] in ('attaching', 'detaching') else
volume['attach_status'])
@staticmethod
def _format_instance_root_device_name(instance, result):
result['rootDeviceName'] = (instance.get('root_device_name') or

View File

@ -36,6 +36,7 @@ from nova.tests.unit import cast_as_call
from nova.tests.unit import fake_network
from nova.tests.unit import fake_notifier
from nova.tests.unit import fake_utils
from nova.tests.unit import fake_volume
from nova.tests.unit.image import fake
from nova.tests.unit import matchers
from nova import volume
@ -286,13 +287,24 @@ class CinderCloudTestCase(test.TestCase):
'instance_type': CONF.default_flavor,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
# NOTE(ft): Since fake attach action is very fast, we replace it to
# empty function to check EC2 API results at 'attaching' stage
self.stubs.Set(fake_volume.API, 'attach',
lambda *args, **kwargs: None)
resp = self.cloud.attach_volume(self.context,
vol1['volumeId'],
ec2_instance_id,
'/dev/sde')
# Here,the status should be 'attaching',but it can be 'attached' in
# unittest scenario if the attach action is very fast.
self.assertIn(resp['status'], ('attaching', 'attached'))
self.assertEqual('attaching', resp['status'])
resp = self.cloud.describe_volumes(self.context, [vol1['volumeId']])
volume = resp['volumeSet'][0]
self.assertEqual('in-use', volume['status'])
self.assertThat({'status': 'attaching',
'volumeId': vol1['volumeId']},
matchers.IsSubDictOf(volume['attachmentSet'][0]))
def test_volume_status_of_detaching_volume(self):
"""Test the volume's status in response when detaching a volume."""
@ -308,13 +320,25 @@ class CinderCloudTestCase(test.TestCase):
'block_device_mapping': [{'device_name': '/dev/sdb',
'volume_id': vol1_uuid,
'delete_on_termination': True}]}
self._run_instance(**kwargs)
ec2_instance_id = self._run_instance(**kwargs)
# NOTE(ft): Since fake detach action is very fast, we replace it to
# empty function to check EC2 API results at 'detaching' stage
self.stubs.Set(fake_volume.API, 'detach',
lambda *args, **kwargs: None)
resp = self.cloud.detach_volume(self.context,
vol1['volumeId'])
# Here,the status should be 'detaching',but it can be 'detached' in
# unittest scenario if the detach action is very fast.
self.assertIn(resp['status'], ('detaching', 'detached'))
self.assertEqual('detaching', resp['status'])
resp = self.cloud.describe_volumes(self.context, [vol1['volumeId']])
volume = resp['volumeSet'][0]
self.assertEqual('in-use', volume['status'])
self.assertThat({'status': 'detaching',
'volumeId': vol1['volumeId'],
'device': '/dev/sdb',
'instanceId': ec2_instance_id},
matchers.IsSubDictOf(volume['attachmentSet'][0]))
def test_describe_snapshots(self):
# Makes sure describe_snapshots works and filters results.
@ -658,6 +682,35 @@ class CinderCloudTestCase(test.TestCase):
self._tearDownBlockDeviceMapping(instances, volumes)
def test_format_instance_bdm_while_attaching_volume(self):
# NOTE(ft): Since instance bdm is deleted immediatly by detach
# operation call, no test for 'detaching' stage is required
vol = self.cloud.create_volume(self.context, size=1)
kwargs = {'image_id': 'ami-1',
'instance_type': CONF.default_flavor,
'max_count': 1}
ec2_instance_id = self._run_instance(**kwargs)
# NOTE(ft): Since fake attach action is very fast, we replace it to
# empty function to check EC2 API results at 'attaching' stage
self.stubs.Set(fake_volume.API, 'attach',
lambda *args, **kwargs: None)
self.cloud.attach_volume(self.context, vol['volumeId'],
ec2_instance_id, '/dev/sde')
resp = self.cloud.describe_instances(self.context,
instance_id=['ami-1'])
resp = resp['reservationSet'][0]
self.assertEqual(1, len(resp['instancesSet']))
inst = resp['instancesSet'][0]
self.assertThat({'deviceName': '/dev/sde',
'ebs': {'deleteOnTermination': False,
'status': 'attaching',
'volumeId': vol['volumeId']}},
matchers.IsSubDictOf(inst['blockDeviceMapping'][0]))
def _setUpImageSet(self, create_volumes_and_snapshots=False):
self.flags(max_local_block_devices=-1)
mappings1 = [