heat/heat_integrationtests/scenario/test_volumes_delete_snapshot.yaml
Steve Baker 30bc841b09 Add volume backup/restore integration test
Adds a more comprehensive test for the cinder volume resources:
- Creates a stack with a volume, and writes data to it
- Deletes the stack with the volume deletion policy set to
  "snapshot" (which really means backup) the volume
- Create a new stack with a volume created from the backup
- Prove the data written in the first stack is still present
Note this test also aims to provide coverage of volume attachment
resources, e.g so we would catch any bugs like bug #1311533 in
future.

Authored-By: Steve Hardy <shardy@redhat.com> based on tempest change
I04ae0cf942d12c4504b2df504a8c940575b90b69

Change-Id: I04ae0cf942d12c4504b2df504a8c940575b90b69
Related-Bug: #1311533
2014-10-08 12:45:34 +13:00

126 lines
3.4 KiB
YAML

heat_template_version: 2013-05-23
parameters:
key_name:
type: string
description: keypair to enable SSH access to the instance.
instance_type:
type: string
description: Type of the instance to be created.
default: m1.small
image_id:
type: string
description: ID of the image to use for the instance to be created.
timeout:
type: number
description: Stack creation timeout
dev_name:
type: string
description: Expected device name for volume
default: vdb
test_string:
type: string
description: Test string which is written to volume
default: ateststring
rescan_timeout:
type: number
description: Max number of seconds to wait for volume after rescan
default: 120
network:
type: string
volume_description:
type: string
description: Description of volume
default: A volume description
volume_size:
type: number
description: Size of volume
default: 1
resources:
volume:
deletion_policy: 'Snapshot'
type: OS::Cinder::Volume
properties:
size: {get_param: volume_size}
description: {get_param: volume_description}
volume_attachment:
type: OS::Cinder::VolumeAttachment
properties:
volume_id: { get_resource: volume }
instance_uuid: { get_resource: instance }
instance:
type: OS::Nova::Server
properties:
image: { get_param: image_id }
flavor: { get_param: instance_type }
key_name: { get_param: key_name }
networks:
- uuid: {get_param: network}
user_data_format: RAW
user_data:
str_replace:
template: |
#!/bin/sh
# Trigger rescan to ensure we see the attached volume
for i in /sys/class/scsi_host/*; do echo "- - -" > $i/scan; done
# Wait for the rescan as the volume doesn't appear immediately
for i in $(seq 1 rescan_timeout)
do
grep -q dev_name /proc/partitions && break
sleep 1
done
if grep -q dev_name /proc/partitions
then
mkfs.ext4 /dev/dev_name
mount /dev/dev_name /mnt
echo "test_string" > /mnt/testfile
umount /mnt
curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "SUCCESS", "Reason": "Test Complete", "Data": "Completed volume configuration.", "UniqueId": "instance1"}' "wc_url"
else
curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "FAILURE", "Reason": "Test Failed", "Data": "Expected device dev_name not found.", "UniqueId": "instance1"}' "wc_url"
fi
params:
wc_url: { get_resource: wait_handle }
dev_name: { get_param: dev_name }
rescan_timeout: { get_param: rescan_timeout }
test_string: { get_param: test_string }
wait_handle:
type: OS::Heat::UpdateWaitConditionHandle
wait_condition:
type: AWS::CloudFormation::WaitCondition
properties:
Count: 1
Handle: { get_resource: wait_handle }
Timeout: { get_param: timeout }
outputs:
status:
description: status
value: { get_attr: ['volume', 'status'] }
size:
description: size
value: { get_attr: ['volume', 'size'] }
display_description:
description: display_description
value: { get_attr: ['volume', 'display_description'] }
volume_id:
value: { get_resource: volume }