From 45c98a22011d39badb2f5fbccf003dfd2a531da2 Mon Sep 17 00:00:00 2001 From: chenying Date: Wed, 12 Apr 2017 19:52:36 +0800 Subject: [PATCH] Add a fullstack test protecting a server which boots from a volume The server could boot from a image. It also could boot from a bootable volume. This patch add a fullstack test about this use case. Change-Id: If12443bb05b99e631406800e250e0804710a6e54 Closes-Bug: #1681879 --- karbor/tests/fullstack/karbor_objects.py | 41 ++++++++++++++++------ karbor/tests/fullstack/test_checkpoints.py | 32 +++++++++++++++++ 2 files changed, 63 insertions(+), 10 deletions(-) diff --git a/karbor/tests/fullstack/karbor_objects.py b/karbor/tests/fullstack/karbor_objects.py index b9023de2..6f382f2c 100644 --- a/karbor/tests/fullstack/karbor_objects.py +++ b/karbor/tests/fullstack/karbor_objects.py @@ -219,15 +219,24 @@ class Server(object): "name": self._name, } - def create(self, name=None, image=None, flavor=DEFAULT_FLAVOR, + def create(self, name=None, image=None, volume=None, flavor=DEFAULT_FLAVOR, network=DEFAULT_NETWORK, timeout=LONG_TIMEOUT): - if not image: - images = self.glance_client.images.list() - for image_iter in images: - if image_iter['disk_format'] not in ('aki', 'ari'): - image = image_iter['id'] - break - assert image + block_device_mapping_v2 = None + if volume: + block_device_mapping_v2 = [{ + 'uuid': volume, + 'source_type': 'volume', + 'destination_type': 'volume', + 'boot_index': 0, + 'delete_on_termination': False}] + else: + if not image: + images = self.glance_client.images.list() + for image_iter in images: + if image_iter['disk_format'] not in ('aki', 'ari'): + image = image_iter['id'] + break + assert image flavor = self.nova_client.flavors.find(name=flavor) if name is None: name = "KarborFullstack-Server-{id}".format( @@ -243,6 +252,7 @@ class Server(object): server = self.nova_client.servers.create( name=name, image=image, + block_device_mapping_v2=block_device_mapping_v2, flavor=flavor, nics=[{"net-id": network_id}], ) @@ -296,6 +306,7 @@ class Volume(object): self.id = None self._name = None self.cinder_client = base._get_cinder_client() + self.glance_client = base._get_glance_client() def _volume_status(self, status=None): try: @@ -316,7 +327,8 @@ class Volume(object): "extra_info": {'availability_zone': 'az1'}, } - def create(self, size, name=None, timeout=LONG_TIMEOUT): + def create(self, size, name=None, create_from_image=False, + timeout=LONG_TIMEOUT): if name is None: name = "KarborFullstack-Volume-{id}".format( id=self.__class__._name_id @@ -324,7 +336,16 @@ class Volume(object): self.__class__._name_id += 1 self._name = name - volume = self.cinder_client.volumes.create(size, name=name) + image = None + if create_from_image: + images = self.glance_client.images.list() + for image_iter in images: + if image_iter['disk_format'] not in ('aki', 'ari'): + image = image_iter['id'] + break + assert image + volume = self.cinder_client.volumes.create(size, name=name, + imageRef=image) self.id = volume.id utils.wait_until_true(partial(self._volume_status, 'available'), timeout=timeout, sleep=MEDIUM_SLEEP) diff --git a/karbor/tests/fullstack/test_checkpoints.py b/karbor/tests/fullstack/test_checkpoints.py index e5d4732e..2c3cb14c 100644 --- a/karbor/tests/fullstack/test_checkpoints.py +++ b/karbor/tests/fullstack/test_checkpoints.py @@ -132,3 +132,35 @@ class CheckpointsTest(karbor_base.KarborBaseTest): backups = self.cinder_client.backups.list(search_opts=search_opts) self.assertEqual(1, len(backups)) server.detach_volume(volume.id) + + def test_server_boot_from_volume_with_attached_volume(self): + """Test checkpoint for server with a bootable volume + + Test checkpoint for server which has booted form one bootable + volume. + """ + bootable_volume = self.store(objects.Volume()) + bootable_volume_id = bootable_volume.create(1, create_from_image=True) + volume = self.store(objects.Volume()) + volume.create(1) + server = self.store(objects.Server()) + server.create(volume=bootable_volume_id) + server.attach_volume(volume.id) + + plan = self.store(objects.Plan()) + plan.create(self.provider_id, [server, ]) + + checkpoint = self.store(objects.Checkpoint()) + checkpoint.create(self.provider_id, plan.id, timeout=2400) + + items = self.karbor_client.checkpoints.list(self.provider_id) + ids = [item.id for item in items] + self.assertTrue(checkpoint.id in ids) + search_opts = {"volume_id": volume.id} + backups = self.cinder_client.backups.list(search_opts=search_opts) + self.assertEqual(1, len(backups)) + search_opts = {"volume_id": bootable_volume_id} + bootable_backups = self.cinder_client.backups.list( + search_opts=search_opts) + self.assertEqual(1, len(bootable_backups)) + server.detach_volume(volume.id)