Add volume backup/restore integration test

Adds a more comprehensive test for the cinder volume resources:
- Creates a stack with a volume, and writes data to it
- Deletes the stack with the volume deletion policy set to
  "snapshot" (which really means backup) the volume
- Create a new stack with a volume created from the backup
- Prove the data written in the first stack is still present
Note this test also aims to provide coverage of volume attachment
resources, e.g so we would catch any bugs like bug #1311533 in
future.

Authored-By: Steve Hardy <shardy@redhat.com> based on tempest change
I04ae0cf942d12c4504b2df504a8c940575b90b69

Change-Id: I04ae0cf942d12c4504b2df504a8c940575b90b69
Related-Bug: #1311533
This commit is contained in:
Steve Baker 2014-08-11 14:39:00 +12:00
parent 9cb907848f
commit 30bc841b09
3 changed files with 380 additions and 0 deletions

View File

@ -0,0 +1,136 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from cinderclient import exceptions as cinder_exceptions
from heat_integrationtests.common import test
LOG = logging.getLogger(__name__)
class VolumeBackupRestoreIntegrationTest(test.HeatIntegrationTest):
def setUp(self):
super(VolumeBackupRestoreIntegrationTest, self).setUp()
self.client = self.orchestration_client
if self.conf.keypair_name:
self.keypair_name = self.conf.keypair_name
else:
self.keypair = self.create_keypair()
self.keypair_name = self.keypair.id
self.volume_description = 'A test volume description 123'
self.volume_size = self.conf.volume_size
def _cinder_verify(self, volume_id, expected_status='available'):
self.assertIsNotNone(volume_id)
volume = self.volume_client.volumes.get(volume_id)
self.assertIsNotNone(volume)
self.assertEqual(expected_status, volume.status)
self.assertEqual(self.volume_size, volume.size)
self.assertEqual(self.volume_description,
volume.display_description)
def _outputs_verify(self, stack, expected_status='available'):
self.assertEqual(expected_status,
self._stack_output(stack, 'status'))
self.assertEqual(six.text_type(self.volume_size),
self._stack_output(stack, 'size'))
self.assertEqual(self.volume_description,
self._stack_output(stack, 'display_description'))
def _create_stack(self, template_name, add_parameters={}):
# TODO(shardy): refactor this into a generic base-class helper
net = self._get_default_network()
stack_name = self._stack_rand_name()
template = self._load_template(__file__, template_name)
parameters = {'key_name': self.keypair_name,
'instance_type': self.conf.instance_type,
'image_id': self.conf.minimal_image_ref,
'volume_description': self.volume_description,
'timeout': self.conf.build_timeout,
'network': net['id']}
parameters.update(add_parameters)
ret_stack = self.client.stacks.create(
stack_name=stack_name,
template=template,
parameters=parameters)
stack_id = ret_stack['stack']['id']
stack = self.client.stacks.get(stack_id)
self.assertIsNotNone(stack)
stack_identifier = '%s/%s' % (stack_name, stack.id)
self.addCleanup(self._stack_delete, stack_identifier)
self._wait_for_stack_status(stack_identifier, 'CREATE_COMPLETE')
return stack, stack_identifier
def test_cinder_volume_create_backup_restore(self):
"""Ensure the 'Snapshot' deletion policy works.
This requires a more complex test, but it tests several aspects
of the heat cinder resources:
1. Create a volume, attach it to an instance, write some data to it
2. Delete the stack, with 'Snapshot' specified, creates a backup
3. Check the snapshot has created a volume backup
4. Create a new stack, where the volume is created from the backup
5. Verify the test data written in (1) is present in the new volume
"""
stack, stack_identifier = self._create_stack(
template_name='test_volumes_delete_snapshot.yaml',
add_parameters={'volume_size': self.volume_size})
# Verify with cinder that the volume exists, with matching details
volume_id = self._stack_output(stack, 'volume_id')
self._cinder_verify(volume_id, expected_status='in-use')
# Verify the stack outputs are as expected
self._outputs_verify(stack, expected_status='in-use')
# Delete the stack and ensure a backup is created for volume_id
# but the volume itself is gone
self.client.stacks.delete(stack_identifier)
self._wait_for_stack_status(stack_identifier, 'DELETE_COMPLETE')
self.assertRaises(cinder_exceptions.NotFound,
self.volume_client.volumes.get,
volume_id)
backups = self.volume_client.backups.list()
self.assertIsNotNone(backups)
backups_filtered = [b for b in backups if b.volume_id == volume_id]
self.assertEqual(1, len(backups_filtered))
backup = backups_filtered[0]
self.addCleanup(self.volume_client.backups.delete, backup.id)
# Now, we create another stack where the volume is created from the
# backup created by the previous stack
stack2, stack_identifier2 = self._create_stack(
template_name='test_volumes_create_from_backup.yaml',
add_parameters={'backup_id': backup.id})
# Verify with cinder that the volume exists, with matching details
volume_id2 = self._stack_output(stack2, 'volume_id')
self._cinder_verify(volume_id2, expected_status='in-use')
# Verify the stack outputs are as expected
self._outputs_verify(stack2, expected_status='in-use')
testfile_data = self._stack_output(stack2, 'testfile_data')
self.assertEqual('{"instance1": "Volume Data:ateststring"}',
testfile_data)
# Delete the stack and ensure the volume is gone
self.client.stacks.delete(stack_identifier2)
self._wait_for_stack_status(stack_identifier2, 'DELETE_COMPLETE')
self.assertRaises(cinder_exceptions.NotFound,
self.volume_client.volumes.get,
volume_id2)

View File

@ -0,0 +1,119 @@
heat_template_version: 2013-05-23
parameters:
key_name:
type: string
description: keypair to enable SSH access to the instance.
instance_type:
type: string
description: Type of the instance to be created.
default: m1.small
image_id:
type: string
description: ID of the image to use for the instance to be created.
timeout:
type: number
description: Stack creation timeout
dev_name:
type: string
description: Expected device name for volume
default: vdb
rescan_timeout:
type: number
description: Max number of seconds to wait for volume after rescan
default: 120
backup_id:
type: string
description: backup_id to create volume from
network:
type: string
volume_description:
type: string
description: Description of volume
default: A volume description
resources:
volume:
type: OS::Cinder::Volume
properties:
backup_id: { get_param: backup_id }
description: { get_param: volume_description }
volume_attachment:
type: OS::Cinder::VolumeAttachment
properties:
volume_id: { get_resource: volume }
instance_uuid: { get_resource: instance }
instance:
type: OS::Nova::Server
properties:
image: { get_param: image_id }
flavor: { get_param: instance_type }
key_name: { get_param: key_name }
networks:
- uuid: {get_param: network}
user_data_format: RAW
user_data:
str_replace:
template: |
#!/bin/sh
# Trigger rescan to ensure we see the attached volume
for i in /sys/class/scsi_host/*; do echo "- - -" > $i/scan; done
# Wait for the rescan as the volume doesn't appear immediately
for i in $(seq 1 rescan_timeout)
do
grep -q dev_name /proc/partitions && break
sleep 1
done
if grep -q dev_name /proc/partitions
then
mount /dev/dev_name /mnt
TESTDATA=$(cat /mnt/testfile)
curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "SUCCESS", "Reason": "Test Complete", "Data": "Volume Data:'$TESTDATA'", "UniqueId": "instance1"}' "wc_url"
else
curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "FAILURE", "Reason": "Test Failed", "Data": "Expected device dev_name not found.", "UniqueId": "instance1"}' "wc_url"
fi
params:
wc_url: { get_resource: wait_handle }
dev_name: { get_param: dev_name }
rescan_timeout: { get_param: rescan_timeout }
wait_handle:
type: OS::Heat::UpdateWaitConditionHandle
wait_condition:
type: AWS::CloudFormation::WaitCondition
properties:
Count: 1
Handle: { get_resource: wait_handle }
Timeout: { get_param: timeout }
outputs:
status:
description: status
value: { get_attr: ['volume', 'status'] }
size:
description: size
value: { get_attr: ['volume', 'size'] }
display_description:
description: display_description
value: { get_attr: ['volume', 'display_description'] }
volume_id:
value: { get_resource: volume }
testfile_data:
description: Contents of /mnt/testfile from the mounted volume
value: { get_attr: ['wait_condition', 'Data'] }

View File

@ -0,0 +1,125 @@
heat_template_version: 2013-05-23
parameters:
key_name:
type: string
description: keypair to enable SSH access to the instance.
instance_type:
type: string
description: Type of the instance to be created.
default: m1.small
image_id:
type: string
description: ID of the image to use for the instance to be created.
timeout:
type: number
description: Stack creation timeout
dev_name:
type: string
description: Expected device name for volume
default: vdb
test_string:
type: string
description: Test string which is written to volume
default: ateststring
rescan_timeout:
type: number
description: Max number of seconds to wait for volume after rescan
default: 120
network:
type: string
volume_description:
type: string
description: Description of volume
default: A volume description
volume_size:
type: number
description: Size of volume
default: 1
resources:
volume:
deletion_policy: 'Snapshot'
type: OS::Cinder::Volume
properties:
size: {get_param: volume_size}
description: {get_param: volume_description}
volume_attachment:
type: OS::Cinder::VolumeAttachment
properties:
volume_id: { get_resource: volume }
instance_uuid: { get_resource: instance }
instance:
type: OS::Nova::Server
properties:
image: { get_param: image_id }
flavor: { get_param: instance_type }
key_name: { get_param: key_name }
networks:
- uuid: {get_param: network}
user_data_format: RAW
user_data:
str_replace:
template: |
#!/bin/sh
# Trigger rescan to ensure we see the attached volume
for i in /sys/class/scsi_host/*; do echo "- - -" > $i/scan; done
# Wait for the rescan as the volume doesn't appear immediately
for i in $(seq 1 rescan_timeout)
do
grep -q dev_name /proc/partitions && break
sleep 1
done
if grep -q dev_name /proc/partitions
then
mkfs.ext4 /dev/dev_name
mount /dev/dev_name /mnt
echo "test_string" > /mnt/testfile
umount /mnt
curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "SUCCESS", "Reason": "Test Complete", "Data": "Completed volume configuration.", "UniqueId": "instance1"}' "wc_url"
else
curl -X PUT -H 'Content-Type:' --data-binary '{"Status": "FAILURE", "Reason": "Test Failed", "Data": "Expected device dev_name not found.", "UniqueId": "instance1"}' "wc_url"
fi
params:
wc_url: { get_resource: wait_handle }
dev_name: { get_param: dev_name }
rescan_timeout: { get_param: rescan_timeout }
test_string: { get_param: test_string }
wait_handle:
type: OS::Heat::UpdateWaitConditionHandle
wait_condition:
type: AWS::CloudFormation::WaitCondition
properties:
Count: 1
Handle: { get_resource: wait_handle }
Timeout: { get_param: timeout }
outputs:
status:
description: status
value: { get_attr: ['volume', 'status'] }
size:
description: size
value: { get_attr: ['volume', 'size'] }
display_description:
description: display_description
value: { get_attr: ['volume', 'display_description'] }
volume_id:
value: { get_resource: volume }