handle new volume's status "reserved"
cinder introduced a new status 'reserved' for volume. It means that volume has reserved for attaching. Cause AWS doesn't know such status then we need to convert it to status 'attaching'. Change-Id: I84ee29eefc8a9db3982c61820e2cf72a56946132
This commit is contained in:
parent
e490186abd
commit
fce377c0ba
|
@ -1579,6 +1579,9 @@ def _cloud_format_instance_bdm(context, os_instance, result,
|
|||
|
||||
|
||||
def _cloud_get_volume_attach_status(volume):
|
||||
if volume.status == 'reserved':
|
||||
# 'reserved' state means that volume will be attached later
|
||||
return 'attaching'
|
||||
if volume.status in ('attaching', 'detaching'):
|
||||
return volume.status
|
||||
elif volume.attachments:
|
||||
|
|
|
@ -178,6 +178,7 @@ def describe_volumes(context, volume_id=None, filter=None,
|
|||
def _format_volume(context, volume, os_volume, instances={}, os_instances={},
|
||||
snapshots={}, snapshot_id=None):
|
||||
valid_ec2_api_volume_status_map = {
|
||||
'reserved': 'in-use',
|
||||
'attaching': 'in-use',
|
||||
'detaching': 'in-use'}
|
||||
|
||||
|
@ -214,11 +215,14 @@ def _format_attachment(context, volume, os_volume, instances={},
|
|||
instance = ec2utils.get_db_item_by_os_id(
|
||||
context, 'i', os_instance_id, instances)
|
||||
instance_id = instance['id']
|
||||
status = os_volume.status
|
||||
if status == 'reserved':
|
||||
status = 'attaching'
|
||||
ec2_attachment = {
|
||||
'device': os_attachment.get('device'),
|
||||
'instanceId': instance_id,
|
||||
'status': (os_volume.status
|
||||
if os_volume.status in ('attaching', 'detaching') else
|
||||
'status': (status
|
||||
if status in ('attaching', 'detaching') else
|
||||
'attached' if os_attachment else 'detached'),
|
||||
'volumeId': volume['id']}
|
||||
if os_instance_id in os_instances:
|
||||
|
|
|
@ -222,11 +222,6 @@ class VolumeTest(base.EC2TestCase):
|
|||
@decorators.idempotent_id('c4b470b7-0825-418f-bc76-533f84247878')
|
||||
@testtools.skipUnless(CONF.aws.ebs_image_id, "EBS image id is not defined")
|
||||
def test_attaching_stage(self):
|
||||
clean_dict = {}
|
||||
instance_id = self.run_instance(ImageId=CONF.aws.ebs_image_id,
|
||||
clean_dict=clean_dict)
|
||||
clean_i = clean_dict['instance']
|
||||
|
||||
data = self.client.create_volume(
|
||||
AvailabilityZone=CONF.aws.aws_zone, Size=1)
|
||||
volume_id = data['VolumeId']
|
||||
|
@ -234,6 +229,11 @@ class VolumeTest(base.EC2TestCase):
|
|||
VolumeId=volume_id)
|
||||
self.get_volume_waiter().wait_available(volume_id)
|
||||
|
||||
clean_dict = {}
|
||||
instance_id = self.run_instance(ImageId=CONF.aws.ebs_image_id,
|
||||
clean_dict=clean_dict)
|
||||
clean_i = clean_dict['instance']
|
||||
|
||||
device_name = '/dev/xvdh'
|
||||
kwargs = {
|
||||
'Device': device_name,
|
||||
|
@ -241,8 +241,6 @@ class VolumeTest(base.EC2TestCase):
|
|||
'VolumeId': volume_id,
|
||||
}
|
||||
data = self.client.attach_volume(*[], **kwargs)
|
||||
clean_vi = self.addResourceCleanUp(self.client.detach_volume,
|
||||
VolumeId=volume_id)
|
||||
self.assertEqual('attaching', data['State'])
|
||||
|
||||
if CONF.aws.run_incompatible_tests:
|
||||
|
@ -253,19 +251,12 @@ class VolumeTest(base.EC2TestCase):
|
|||
self.get_volume_attachment_waiter().wait_available(
|
||||
volume_id, final_set=('attached'))
|
||||
|
||||
# reorder cleanups to avoid error on volume delete
|
||||
self.cancelResourceCleanUp(clean_i)
|
||||
clean_i = self.addResourceCleanUp(self.client.terminate_instances,
|
||||
InstanceIds=[instance_id])
|
||||
|
||||
# stop instance to prevent 'busy' state of detached volume
|
||||
data = self.client.stop_instances(InstanceIds=[instance_id])
|
||||
self.get_instance_waiter().wait_available(instance_id,
|
||||
final_set=('stopped'))
|
||||
|
||||
self.client.detach_volume(VolumeId=volume_id)
|
||||
self.get_volume_attachment_waiter().wait_delete(volume_id)
|
||||
self.cancelResourceCleanUp(clean_vi)
|
||||
|
||||
self.client.delete_volume(VolumeId=volume_id)
|
||||
self.cancelResourceCleanUp(clean_v)
|
||||
|
|
Loading…
Reference in New Issue