diff --git a/karbor/services/protection/flows/protect.py b/karbor/services/protection/flows/protect.py index 94d10661..9e1164bb 100644 --- a/karbor/services/protection/flows/protect.py +++ b/karbor/services/protection/flows/protect.py @@ -12,7 +12,6 @@ from karbor.common import constants from karbor.resource import Resource -from karbor.services.protection.protectable_registry import ProtectableRegistry from karbor.services.protection import resource_flow from oslo_log import log as logging from taskflow import task @@ -45,8 +44,8 @@ class CompleteProtectTask(task.Task): checkpoint.commit() -def get_flow(context, workflow_engine, plan, provider, checkpoint): - protectable_registry = ProtectableRegistry() +def get_flow(context, protectable_registry, workflow_engine, plan, provider, + checkpoint): resources = set(Resource(**item) for item in plan.get("resources")) resource_graph = protectable_registry.build_graph(context, resources) diff --git a/karbor/services/protection/flows/worker.py b/karbor/services/protection/flows/worker.py index 4f3a0ca8..7723009a 100644 --- a/karbor/services/protection/flows/worker.py +++ b/karbor/services/protection/flows/worker.py @@ -53,8 +53,10 @@ class Worker(object): **kwargs): if operation_type == constants.OPERATION_PROTECT: plan = kwargs.get('plan', None) + protectable_registry = kwargs.get('protectable_registry', None) flow = flow_protect.get_flow( context, + protectable_registry, self.workflow_engine, plan, provider, diff --git a/karbor/services/protection/manager.py b/karbor/services/protection/manager.py index 6771b417..ea7743b2 100644 --- a/karbor/services/protection/manager.py +++ b/karbor/services/protection/manager.py @@ -117,6 +117,7 @@ class ProtectionManager(manager.Manager): try: flow = self.worker.get_flow( context=context, + protectable_registry=self.protectable_registry, operation_type=constants.OPERATION_PROTECT, plan=plan, provider=provider, diff --git a/karbor/services/protection/protection_plugins/server/nova_protection_plugin.py b/karbor/services/protection/protection_plugins/server/nova_protection_plugin.py index 6d3ad6a8..6a65661a 100644 --- a/karbor/services/protection/protection_plugins/server/nova_protection_plugin.py +++ b/karbor/services/protection/protection_plugins/server/nova_protection_plugin.py @@ -10,17 +10,11 @@ # License for the specific language governing permissions and limitations # under the License. -import eventlet -from io import BytesIO -import os -from time import sleep - from karbor.common import constants from karbor import exception from karbor.i18n import _LE, _LI from karbor.services.protection.client_factory import ClientFactory -from karbor.services.protection.protection_plugins.base_protection_plugin \ - import BaseProtectionPlugin +from karbor.services.protection import protection_plugin from karbor.services.protection.protection_plugins.server \ import server_plugin_schemas from karbor.services.protection.restore_heat import HeatResource @@ -28,95 +22,60 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils -protection_opts = [ - cfg.IntOpt('backup_image_object_size', - default=52428800, - help='The size in bytes of instance image objects') -] CONF = cfg.CONF -CONF.register_opts(protection_opts) LOG = logging.getLogger(__name__) VOLUME_ATTACHMENT_RESOURCE = 'OS::Cinder::VolumeAttachment' FLOATING_IP_ASSOCIATION = 'OS::Nova::FloatingIPAssociation' -class NovaProtectionPlugin(BaseProtectionPlugin): - _SUPPORT_RESOURCE_TYPES = [constants.SERVER_RESOURCE_TYPE] - - def __init__(self, config=None): - super(NovaProtectionPlugin, self).__init__(config) - self._tp = eventlet.GreenPool() - self.image_object_size = CONF.backup_image_object_size - - def _add_to_threadpool(self, func, *args, **kwargs): - self._tp.spawn_n(func, *args, **kwargs) - - @classmethod - def get_options_schema(cls, resource_type): - return server_plugin_schemas.OPTIONS_SCHEMA - - @classmethod - def get_restore_schema(cls, resource_type): - return server_plugin_schemas.RESTORE_SCHEMA - - @classmethod - def get_saved_info_schema(cls, resource_type): - return server_plugin_schemas.SAVED_INFO_SCHEMA - - @classmethod - def get_saved_info(cls, metadata_store, resource): - # TODO(luobin) - pass - - def _glance_client(self, cntxt): - return ClientFactory.create_client("glance", cntxt) - - def _nova_client(self, cntxt): - return ClientFactory.create_client("nova", cntxt) - - def _cinder_client(self, cntxt): - return ClientFactory.create_client("cinder", cntxt) - - def _neutron_client(self, cntxt): - return ClientFactory.create_client("neutron", cntxt) - - def create_backup(self, cntxt, checkpoint, **kwargs): - resource_node = kwargs.get("node") - server_id = resource_node.value.id +class ProtectOperation(protection_plugin.Operation): + def __init__(self): + super(ProtectOperation, self).__init__() + def on_main(self, checkpoint, resource, context, parameters, **kwargs): + server_id = resource.id bank_section = checkpoint.get_resource_bank_section(server_id) - nova_client = self._nova_client(cntxt) - glance_client = self._glance_client(cntxt) - cinder_client = self._cinder_client(cntxt) - neutron_client = self._neutron_client(cntxt) + nova_client = ClientFactory.create_client("nova", context) + cinder_client = ClientFactory.create_client("cinder", context) + neutron_client = ClientFactory.create_client("neutron", context) resource_definition = {"resource_id": server_id} - child_nodes = resource_node.child_nodes - attach_metadata = {} - LOG.info(_LI("creating server backup, server_id: %s."), server_id) + # get dependent resources + server_child_nodes = [] + resources = checkpoint.resource_graph + for resource_node in resources: + resource = resource_node.value + if resource.id == server_id: + server_child_nodes = resource_node.child_nodes + LOG.info(_LI("Creating server backup, server_id: %s. "), server_id) try: bank_section.update_object("status", constants.RESOURCE_STATUS_PROTECTING) - for child_node in child_nodes: - child_resource = child_node.value + # get attach_metadata about volume + attach_metadata = {} + for server_child_node in server_child_nodes: + child_resource = server_child_node.value if child_resource.type == constants.VOLUME_RESOURCE_TYPE: volume = cinder_client.volumes.get(child_resource.id) attachments = getattr(volume, "attachments") for attachment in attachments: if attachment["server_id"] == server_id: - attach_metadata[child_resource.id] = attachment[ - "device"] + attachment["bootable"] = getattr( + volume, "bootable") + attach_metadata[child_resource.id] = attachment resource_definition["attach_metadata"] = attach_metadata + # get metadata about AZ server = nova_client.servers.get(server_id) availability_zone = getattr(server, "OS-EXT-AZ:availability_zone") + # get metadata about network, flavor, key_name, security_groups addresses = getattr(server, "addresses") networks = [] floating_ips = [] @@ -132,177 +91,112 @@ class NovaProtectionPlugin(BaseProtectionPlugin): networks.append(port["network_id"]) elif network_type == "floating": floating_ips.append(addr) - flavor = getattr(server, "flavor")["id"] key_name = getattr(server, "key_name", None) security_groups = getattr(server, "security_groups", None) + # get metadata about boot device + boot_metadata = {} + image_info = getattr(server, "image", None) + if image_info is not None and isinstance(image_info, dict): + boot_metadata["boot_device_type"] = "image" + boot_metadata["boot_image_id"] = image_info['id'] + else: + boot_metadata["boot_device_type"] = "volume" + volumes_attached = getattr( + server, "os-extended-volumes:volumes_attached", []) + for volume_attached in volumes_attached: + volume_id = volume_attached["id"] + volume_attach_metadata = attach_metadata.get( + volume_id, None) + if volume_attach_metadata is not None and ( + volume_attach_metadata["bootable"] == "true"): + boot_metadata["boot_volume_id"] = volume_id + boot_metadata["boot_attach_metadata"] = ( + volume_attach_metadata) + resource_definition["boot_metadata"] = boot_metadata + + # save all server's metadata server_metadata = {"availability_zone": availability_zone, "networks": networks, "floating_ips": floating_ips, "flavor": flavor, "key_name": key_name, - "security_groups": security_groups + "security_groups": security_groups, } resource_definition["server_metadata"] = server_metadata - - snapshot_id = nova_client.servers.create_image( - server_id, "snapshot_%s" % server_id) - + LOG.info("Creating server backup, resource_definition: %s.", + resource_definition) bank_section.update_object("metadata", resource_definition) - except Exception as err: - # update resource_definition backup_status - LOG.error(_LE("create backup failed, server_id: %s."), server_id) - bank_section.update_object("status", - constants.RESOURCE_STATUS_ERROR) - raise exception.CreateBackupFailed( - reason=err, - resource_id=server_id, - resource_type=constants.SERVER_RESOURCE_TYPE) - - self._add_to_threadpool(self._create_backup, glance_client, - bank_section, server_id, snapshot_id, - resource_definition, checkpoint) - - def _create_backup(self, glance_client, bank_section, server_id, - snapshot_id, resource_definition, checkpoint): - try: - image = glance_client.images.get(snapshot_id) - # TODO(luobin): config retry_attempts - retry_attempts = 10 - while image.status == "queued" and retry_attempts != 0: - sleep(60) - image = glance_client.images.get(snapshot_id) - retry_attempts -= 1 - if retry_attempts == 0: - raise Exception - - resource_definition["snapshot_id"] = snapshot_id - snapshot_metadata = { - "disk_format": image.disk_format, - "container_format": image.container_format, - "name": "snapshot_%s@%s" % (checkpoint.id, server_id) - } - - if getattr(image, "kernel_id", None) is not None: - kernel = glance_client.images.get(image.kernel_id) - kernel_metadata = { - "disk_format": kernel.disk_format, - "container_format": kernel.container_format, - "name": "kernel_%s@%s" % (checkpoint.id, server_id) - } - snapshot_metadata["kernel_metadata"] = kernel_metadata - - if getattr(image, "ramdisk_id", None) is not None: - ramdisk = glance_client.images.get(image.ramdisk_id) - ramdisk_metadata = { - "disk_format": ramdisk.disk_format, - "container_format": ramdisk.container_format, - "name": "ramdisk_%s@%s" % (checkpoint.id, server_id) - } - snapshot_metadata["ramdisk_metadata"] = ramdisk_metadata - - resource_definition["snapshot_metadata"] = snapshot_metadata - # write resource_definition in bank - bank_section.update_object("metadata", resource_definition) - - image = glance_client.images.get(snapshot_id) - # TODO(luobin): config retry_attempts - retry_attempts = 10 - while image.status != "active" and retry_attempts != 0: - sleep(60) - image = glance_client.images.get(snapshot_id) - retry_attempts -= 1 - if retry_attempts == 0: - raise Exception - - # store kernel_data if need - if getattr(image, "kernel_id", None) is not None: - kernel_id = image.kernel_id - kernel_response = glance_client.images.data(kernel_id) - kernel_response_data = BytesIO() - for chunk in kernel_response: - kernel_response_data.write(chunk) - kernel_response_data.seek(0, os.SEEK_SET) - - chunks = 0 - while True: - data = kernel_response_data.read(self.image_object_size) - if data == '': - break - bank_section.update_object("kernel_" + str(chunks), data) - chunks += 1 - - # store ramdisk_data if need - if getattr(image, "ramdisk_id", None) is not None: - ramdisk_id = image.ramdisk_id - ramdisk_response = glance_client.images.data(ramdisk_id) - ramdisk_response_data = BytesIO() - for chunk in ramdisk_response: - ramdisk_response_data.write(chunk) - ramdisk_response_data.seek(0, os.SEEK_SET) - - chunks = 0 - while True: - data = ramdisk_response_data.read(self.image_object_size) - if data == '': - break - bank_section.update_object("ramdisk_" + str(chunks), data) - chunks += 1 - - # store snapshot_data - image_response = glance_client.images.data(snapshot_id) - image_response_data = BytesIO() - for chunk in image_response: - image_response_data.write(chunk) - image_response_data.seek(0, os.SEEK_SET) - - chunks = 0 - while True: - data = image_response_data.read(self.image_object_size) - if data == '': - break - bank_section.update_object("snapshot_" + str(chunks), data) - chunks += 1 - - glance_client.images.delete(snapshot_id) # update resource_definition backup_status bank_section.update_object("status", constants.RESOURCE_STATUS_AVAILABLE) - LOG.info(_LI("finish backup server, server_id: %s."), server_id) + LOG.info(_LI("Finish backup server, server_id: %s."), server_id) except Exception as err: - LOG.error(_LE("create backup failed, server_id: %s."), server_id) + # update resource_definition backup_status + LOG.exception(_LE("Create backup failed, server_id: %s."), + server_id) bank_section.update_object("status", constants.RESOURCE_STATUS_ERROR) - raise exception.CreateBackupFailed( reason=err, resource_id=server_id, resource_type=constants.SERVER_RESOURCE_TYPE) - def restore_backup(self, cntxt, checkpoint, **kwargs): - resource_node = kwargs.get("node") - original_server_id = resource_node.value.id + +class DeleteOperation(protection_plugin.Operation): + def __init__(self): + super(DeleteOperation, self).__init__() + + def on_main(self, checkpoint, resource, context, parameters, **kwargs): + resource_id = resource.id + bank_section = checkpoint.get_resource_bank_section(resource_id) + + LOG.info(_LI("deleting server backup, server_id: %s."), resource_id) + + try: + bank_section.update_object("status", + constants.RESOURCE_STATUS_DELETING) + objects = bank_section.list_objects() + for obj in objects: + if obj == "status": + continue + bank_section.delete_object(obj) + bank_section.update_object("status", + constants.RESOURCE_STATUS_DELETED) + LOG.info(_LI("finish delete server, server_id: %s."), resource_id) + except Exception as err: + # update resource_definition backup_status + LOG.error(_LE("Delete backup failed, server_id: %s."), resource_id) + bank_section.update_object("status", + constants.RESOURCE_STATUS_ERROR) + raise exception.DeleteBackupFailed( + reason=err, + resource_id=resource_id, + resource_type=constants.SERVER_RESOURCE_TYPE) + + +class RestoreOperation(protection_plugin.Operation): + def __init__(self): + super(RestoreOperation, self).__init__() + + def on_complete(self, checkpoint, resource, context, parameters, **kwargs): + original_server_id = resource.id heat_template = kwargs.get("heat_template") - restore_name = kwargs.get("restore_name", "karbor-restore-server") + restore_name = parameters.get("restore_name", "karbor-restore-server") - LOG.info(_LI("restoring server backup, server_id: %s."), + LOG.info(_LI("Restoring server backup, server_id: %s."), original_server_id) bank_section = checkpoint.get_resource_bank_section(original_server_id) try: resource_definition = bank_section.get_object("metadata") - # restore server snapshot - image_id = self._restore_server_snapshot( - bank_section, checkpoint, cntxt, - original_server_id, resource_definition) - # restore server instance self._heat_restore_server_instance( - heat_template, image_id, original_server_id, + heat_template, original_server_id, restore_name, resource_definition) # restore volume attachment @@ -312,93 +206,55 @@ class NovaProtectionPlugin(BaseProtectionPlugin): # restore floating ip association self._heat_restore_floating_association( heat_template, original_server_id, resource_definition) - + LOG.debug("Restoring server backup, heat_template: %s.", + heat_template) + LOG.info(_LI("Finish restore server, server_id: %s."), + original_server_id) except Exception as e: - LOG.error(_LE("restore server backup failed, server_id: %s."), - original_server_id) + LOG.exception(_LE("restore server backup failed, server_id: %s."), + original_server_id) raise exception.RestoreBackupFailed( reason=e, resource_id=original_server_id, resource_type=constants.SERVER_RESOURCE_TYPE ) - def _restore_server_snapshot(self, bank_section, checkpoint, cntxt, - original_id, resource_definition): - snapshot_metadata = resource_definition["snapshot_metadata"] - - glance_client = self._glance_client(cntxt) - objects = [key.split("/")[-1] for key in - bank_section.list_objects()] - - # restore kernel if needed - kernel_id = None - if snapshot_metadata.get("kernel_metadata") is not None: - kernel_id = self._restore_image( - bank_section, checkpoint, glance_client, "kernel", - snapshot_metadata["kernel_metadata"], objects, - original_id) - - # restore ramdisk if needed - ramdisk_id = None - if snapshot_metadata.get("ramdisk_metadata") is not None: - ramdisk_id = self._restore_image( - bank_section, checkpoint, glance_client, "ramdisk", - snapshot_metadata["ramdisk_metadata"], objects, - original_id) - - # restore image - image_id = self._restore_image( - bank_section, checkpoint, glance_client, "snapshot", - snapshot_metadata, objects, original_id, - kernel_id=kernel_id, ramdisk_id=ramdisk_id) - - image_info = glance_client.images.get(image_id) - retry_attempts = 10 - while image_info.status != "active" and retry_attempts != 0: - sleep(60) - image_info = glance_client.images.get(image_id) - retry_attempts -= 1 - if retry_attempts == 0: - raise Exception - return image_id - - def _restore_image(self, bank_section, checkpoint, glance_client, - image_format, image_metadata, objects, original_id, - **kwargs): - if image_metadata.get("name") is None: - name = "%s_%s@%s" % (image_format, checkpoint.id, - original_id) - else: - name = image_metadata["name"] - disk_format = image_metadata["disk_format"] - container_format = image_metadata["container_format"] - image_data = BytesIO() - for obj in objects: - if obj.find("%s_" % image_format) == 0: - data = bank_section.get_object(obj) - image_data.write(data) - image_data.seek(0, os.SEEK_SET) - image = glance_client.images.create( - disk_format=disk_format, - container_format=container_format, - name=name, - kernel_id=kwargs.get("kernel_id"), - ramdisk_id=kwargs.get("ramdisk_id")) - image_id = image.id - glance_client.images.upload(image_id, image_data) - return image_id - - def _heat_restore_server_instance(self, heat_template, image_id, + def _heat_restore_server_instance(self, heat_template, original_id, restore_name, resource_definition): server_metadata = resource_definition["server_metadata"] properties = { "availability_zone": server_metadata["availability_zone"], "flavor": server_metadata["flavor"], - "image": image_id, "name": restore_name, } + # server boot device + boot_metadata = resource_definition["boot_metadata"] + boot_device_type = boot_metadata["boot_device_type"] + if boot_device_type == "image": + original_image_id = boot_metadata["boot_image_id"] + image_id = heat_template.get_resource_reference( + original_image_id) + properties["image"] = image_id + elif boot_device_type == "volume": + original_volume_id = boot_metadata["boot_volume_id"] + volume_id = heat_template.get_resource_reference( + original_volume_id) + properties["block_device_mapping_v2"] = [{ + "volume_id": volume_id, + "delete_on_termination": False, + "boot_index": 0, + }] + else: + LOG.exception(_LE("Restore server backup failed, server_id: %s."), + original_id) + raise exception.RestoreBackupFailed( + reason="Can not find the boot device of the server.", + resource_id=original_id, + resource_type=constants.SERVER_RESOURCE_TYPE + ) + # server key_name, security_groups, networks if server_metadata["key_name"] is not None: properties["key_name"] = server_metadata["key_name"] @@ -426,24 +282,25 @@ class NovaProtectionPlugin(BaseProtectionPlugin): original_server_id, resource_definition): attach_metadata = resource_definition["attach_metadata"] - for original_volume_id, device in attach_metadata.items(): - instance_uuid = heat_template.get_resource_reference( - original_server_id) - volume_id = heat_template.get_resource_reference( - original_volume_id) - properties = {"mountpoint": device, - "instance_uuid": instance_uuid, - "volume_id": volume_id} - - heat_resource_id = uuidutils.generate_uuid() - heat_attachment_resource = HeatResource( - heat_resource_id, - VOLUME_ATTACHMENT_RESOURCE) - for key, value in properties.items(): - heat_attachment_resource.set_property(key, value) - heat_template.put_resource( - "%s_%s" % (original_server_id, original_volume_id), - heat_attachment_resource) + for original_id, attach_metadata_item in attach_metadata.items(): + device = attach_metadata_item.get("device", None) + if attach_metadata_item.get("bootable", None) != "true": + instance_uuid = heat_template.get_resource_reference( + original_server_id) + volume_id = heat_template.get_resource_reference( + original_id) + properties = {"mountpoint": device, + "instance_uuid": instance_uuid, + "volume_id": volume_id} + heat_resource_id = uuidutils.generate_uuid() + heat_attachment_resource = HeatResource( + heat_resource_id, + VOLUME_ATTACHMENT_RESOURCE) + for key, value in properties.items(): + heat_attachment_resource.set_property(key, value) + heat_template.put_resource( + "%s_%s" % (original_server_id, original_id), + heat_attachment_resource) def _heat_restore_floating_association(self, heat_template, original_server_id, @@ -464,33 +321,38 @@ class NovaProtectionPlugin(BaseProtectionPlugin): "%s_%s" % (original_server_id, floating_ip), heat_floating_resource) - def delete_backup(self, cntxt, checkpoint, **kwargs): - resource_node = kwargs.get("node") - resource_id = resource_node.value.id - bank_section = checkpoint.get_resource_bank_section(resource_id) - LOG.info(_LI("deleting server backup, server_id: %s."), resource_id) +class NovaProtectionPlugin(protection_plugin.ProtectionPlugin): + _SUPPORT_RESOURCE_TYPES = [constants.SERVER_RESOURCE_TYPE] - try: - bank_section.update_object("status", - constants.RESOURCE_STATUS_DELETING) - objects = bank_section.list_objects() - for obj in objects: - if obj == "status": - continue - bank_section.delete_object(obj) - bank_section.update_object("status", - constants.RESOURCE_STATUS_DELETED) - except Exception as err: - # update resource_definition backup_status - LOG.error(_LE("delete backup failed, server_id: %s."), resource_id) - bank_section.update_object("status", - constants.RESOURCE_STATUS_ERROR) - raise exception.DeleteBackupFailed( - reason=err, - resource_id=resource_id, - resource_type=constants.SERVER_RESOURCE_TYPE) + def __init__(self, config=None): + super(NovaProtectionPlugin, self).__init__(config) @classmethod def get_supported_resources_types(cls): return cls._SUPPORT_RESOURCE_TYPES + + @classmethod + def get_options_schema(cls, resource_type): + return server_plugin_schemas.OPTIONS_SCHEMA + + @classmethod + def get_restore_schema(cls, resource_type): + return server_plugin_schemas.RESTORE_SCHEMA + + @classmethod + def get_saved_info_schema(cls, resource_type): + return server_plugin_schemas.SAVED_INFO_SCHEMA + + @classmethod + def get_saved_info(cls, metadata_store, resource): + pass + + def get_protect_operation(self, resource): + return ProtectOperation() + + def get_restore_operation(self, resource): + return RestoreOperation() + + def get_delete_operation(self, resource): + return DeleteOperation() diff --git a/karbor/tests/fullstack/test_checkpoints.py b/karbor/tests/fullstack/test_checkpoints.py index 1c829eed..9a114a75 100644 --- a/karbor/tests/fullstack/test_checkpoints.py +++ b/karbor/tests/fullstack/test_checkpoints.py @@ -94,7 +94,6 @@ class CheckpointsTest(karbor_base.KarborBaseTest): def test_checkpoint_for_server_attached_volume(self): """Test checkpoint for server which has attached some volumes""" - self.skipTest('Requires server protection plugin adjustment') volume = self.store(objects.Volume()) volume.create(1) server = self.store(objects.Server()) diff --git a/karbor/tests/unit/protection/test_nova_protection_plugin.py b/karbor/tests/unit/protection/test_nova_protection_plugin.py index 93e77804..aea56bf6 100644 --- a/karbor/tests/unit/protection/test_nova_protection_plugin.py +++ b/karbor/tests/unit/protection/test_nova_protection_plugin.py @@ -11,16 +11,18 @@ # under the License. import collections + +from collections import namedtuple from karbor.common import constants from karbor.context import RequestContext from karbor.resource import Resource from karbor.services.protection.bank_plugin import Bank from karbor.services.protection.bank_plugin import BankPlugin from karbor.services.protection.bank_plugin import BankSection -from karbor.services.protection.protection_plugins.server. \ - nova_protection_plugin import NovaProtectionPlugin from karbor.services.protection.protection_plugins.server \ import server_plugin_schemas +from karbor.services.protection.protection_plugins.server. \ + nova_protection_plugin import NovaProtectionPlugin from karbor.tests import base import mock @@ -37,11 +39,14 @@ class Server(object): class Volume(object): - def __init__(self, id, volume_type, status, attachments): + def __init__(self, id, volume_type, status, bootable, + attachments, name=None): self.id = id self.volume_type = volume_type self.status = status + self.bootable = bootable self.attachments = attachments + self.name = name class Image(object): @@ -108,12 +113,14 @@ FakeVolumes = { "vol_id_1": Volume(id="vol_id_1", volume_type="", status="in-use", + bootable="", attachments=[{'server_id': 'vm_id_2', 'attachment_id': '', 'host_name': '', 'volume_id': 'vol_id_1', 'device': '/dev/vdb', - 'id': 'attach_id_1'}]) + 'id': 'attach_id_1'}], + name="vol_id_1_name") } FakeImages = { @@ -123,6 +130,11 @@ FakeImages = { status="active") } +FakeGraphNode = namedtuple("GraphNode", ( + "value", + "child_nodes", +)) + class FakeNovaClient(object): class Servers(object): @@ -167,6 +179,9 @@ class FakeCinderClient(object): def get(self, volume_id): return FakeVolumes[volume_id] + def list(self, detailed=True, search_opts=None, limit=None): + return [FakeVolumes['vol_id_1'], ] + def __getattr__(self, item): return None @@ -227,6 +242,15 @@ ResourceNode = collections.namedtuple( class Checkpoint(object): def __init__(self): self.id = "checkpoint_id" + self.graph = [] + + @property + def resource_graph(self): + return self.graph + + @resource_graph.setter + def resource_graph(self, resource_graph): + self.graph = resource_graph def get_resource_bank_section(self, resource_id): return BankSection( @@ -235,6 +259,21 @@ class Checkpoint(object): ) +def call_hooks(operation, checkpoint, resource, context, parameters, **kwargs): + def noop(*args, **kwargs): + pass + + hooks = ( + 'on_prepare_begin', + 'on_prepare_finish', + 'on_main', + 'on_complete', + ) + for hook_name in hooks: + hook = getattr(operation, hook_name, noop) + hook(checkpoint, resource, context, parameters, **kwargs) + + class NovaProtectionPluginTest(base.TestCase): def setUp(self): super(NovaProtectionPluginTest, self).setUp() @@ -264,38 +303,36 @@ class NovaProtectionPluginTest(base.TestCase): self.assertEqual(options_schema, server_plugin_schemas.SAVED_INFO_SCHEMA) - def test_create_backup_without_volumes(self): + @mock.patch('karbor.services.protection.clients.neutron.create') + @mock.patch('karbor.services.protection.clients.glance.create') + @mock.patch('karbor.services.protection.clients.nova.create') + @mock.patch('karbor.services.protection.clients.cinder.create') + def test_create_backup_without_volumes(self, mock_cinder_client, + mock_nova_client, + mock_glance_client, + mock_neutron_client): resource = Resource(id="vm_id_1", type=constants.SERVER_RESOURCE_TYPE, name="fake_vm") - resource_node = ResourceNode(value=resource, - child_nodes=[]) - backup_name = "fake_backup" - self.plugin._cinder_client = mock.MagicMock() - self.plugin._cinder_client.return_value = self.cinder_client + protect_operation = self.plugin.get_protect_operation(resource) + mock_cinder_client.return_value = self.cinder_client + mock_nova_client.return_value = self.nova_client + mock_glance_client.return_value = self.glance_client + mock_neutron_client.return_value = self.neutron_client - self.plugin._nova_client = mock.MagicMock() - self.plugin._nova_client.return_value = self.nova_client - - self.plugin._glance_client = mock.MagicMock() - self.plugin._glance_client.return_value = self.glance_client - - self.plugin._neutron_client = mock.MagicMock() - self.plugin._neutron_client.return_value = self.neutron_client - - self.plugin.create_backup(self.cntxt, self.checkpoint, - node=resource_node, - backup_name=backup_name) + call_hooks(protect_operation, self.checkpoint, resource, self.cntxt, + {}) self.assertEqual( - constants.RESOURCE_STATUS_PROTECTING, + constants.RESOURCE_STATUS_AVAILABLE, fake_bank._plugin._objects[ "/resource_data/checkpoint_id/vm_id_1/status"] ) resource_definition = { "resource_id": "vm_id_1", "attach_metadata": {}, + 'boot_metadata': {'boot_device_type': 'volume'}, "server_metadata": { "availability_zone": "nova", "networks": ["network_id_1"], @@ -311,43 +348,42 @@ class NovaProtectionPluginTest(base.TestCase): "/resource_data/checkpoint_id/vm_id_1/metadata"] ) - def test_create_backup_with_volumes(self): + @mock.patch('karbor.services.protection.clients.neutron.create') + @mock.patch('karbor.services.protection.clients.glance.create') + @mock.patch('karbor.services.protection.clients.nova.create') + @mock.patch('karbor.services.protection.clients.cinder.create') + def test_create_backup_with_volumes(self, mock_cinder_client, + mock_nova_client, + mock_glance_client, + mock_neutron_client): vm_resource = Resource(id="vm_id_2", type=constants.SERVER_RESOURCE_TYPE, name="fake_vm") - vol_resource = Resource(id="vol_id_1", - type=constants.VOLUME_RESOURCE_TYPE, - name="fake_vol") - vol_node = ResourceNode(value=vol_resource, - child_nodes=[]) - vm_node = ResourceNode(value=vm_resource, - child_nodes=[vol_node]) - backup_name = "fake_backup" - self.plugin._cinder_client = mock.MagicMock() - self.plugin._cinder_client.return_value = self.cinder_client + protect_operation = self.plugin.get_protect_operation(vm_resource) + mock_cinder_client.return_value = self.cinder_client + mock_nova_client.return_value = self.nova_client + mock_glance_client.return_value = self.glance_client + mock_neutron_client.return_value = self.neutron_client + checkpoint = Checkpoint() + checkpoint.resource_graph = [FakeGraphNode(value=Resource( + type='OS::Nova::Server', id='vm_id_2', name='None'), + child_nodes=[FakeGraphNode(value=Resource( + type='OS::Cinder::Volume', id='vol_id_1', name=None), + child_nodes=())])] - self.plugin._nova_client = mock.MagicMock() - self.plugin._nova_client.return_value = self.nova_client - - self.plugin._glance_client = mock.MagicMock() - self.plugin._glance_client.return_value = self.glance_client - - self.plugin._neutron_client = mock.MagicMock() - self.plugin._neutron_client.return_value = self.neutron_client - - self.plugin.create_backup(self.cntxt, self.checkpoint, - node=vm_node, - backup_name=backup_name) + call_hooks(protect_operation, checkpoint, vm_resource, self.cntxt, + {}) self.assertEqual( fake_bank._plugin._objects[ "/resource_data/checkpoint_id/vm_id_2/status"], - constants.RESOURCE_STATUS_PROTECTING + constants.RESOURCE_STATUS_AVAILABLE ) resource_definition = { "resource_id": "vm_id_2", "attach_metadata": {"vol_id_1": "/dev/vdb"}, + 'boot_metadata': {'boot_device_type': 'volume'}, "server_metadata": { "availability_zone": "nova", "networks": ["network_id_2"], @@ -356,6 +392,15 @@ class NovaProtectionPluginTest(base.TestCase): "key_name": None, "security_groups": "default", }, + 'attach_metadata': { + 'vol_id_1': {'attachment_id': '', + 'bootable': '', + 'device': '/dev/vdb', + 'host_name': '', + 'id': 'attach_id_1', + 'server_id': 'vm_id_2', + 'volume_id': 'vol_id_1'} + }, } self.assertEqual( fake_bank._plugin._objects[ @@ -363,12 +408,11 @@ class NovaProtectionPluginTest(base.TestCase): resource_definition ) - def test_delete_backup(self): + @mock.patch('karbor.services.protection.clients.glance.create') + def test_delete_backup(self, mock_glance_client): resource = Resource(id="vm_id_1", type=constants.SERVER_RESOURCE_TYPE, name="fake_vm") - resource_node = ResourceNode(value=resource, - child_nodes=[]) fake_bank._plugin._objects[ "/resource_data/checkpoint_id/vm_id_1/metadata"] = { @@ -390,8 +434,8 @@ class NovaProtectionPluginTest(base.TestCase): } } - self.plugin._glance_client = mock.MagicMock() - self.plugin._glance_client.return_value = self.glance_client + delete_operation = self.plugin.get_delete_operation(resource) + mock_glance_client.return_value = self.glance_client fake_bank._plugin._objects[ "/resource_data/checkpoint_id/vm_id_1/data_0" @@ -400,8 +444,8 @@ class NovaProtectionPluginTest(base.TestCase): "/resource_data/checkpoint_id/vm_id_1/data_1" ] = "image_data_1" - self.plugin.delete_backup(self.cntxt, self.checkpoint, - node=resource_node) + call_hooks(delete_operation, self.checkpoint, resource, self.cntxt, + {}) def test_get_supported_resources_types(self): types = self.plugin.get_supported_resources_types()