From ef4536ea98a381692f52eaac72c999ed816f01a8 Mon Sep 17 00:00:00 2001 From: Sergey Reshetnyak Date: Thu, 4 Jul 2013 18:54:42 +0400 Subject: [PATCH] Add support attach cinder volume to scale cluster Partially implement blueprint attach-cinder-volume-scaling-support Change-Id: I54d23195c208ccd808d83c1d44a45014df5d3bae --- savanna/service/instances.py | 26 ++++++++++++++++---------- savanna/service/volumes.py | 30 ++++++++++++++++++++++-------- 2 files changed, 38 insertions(+), 18 deletions(-) diff --git a/savanna/service/instances.py b/savanna/service/instances.py index 11c33c76..f5e46941 100644 --- a/savanna/service/instances.py +++ b/savanna/service/instances.py @@ -62,6 +62,7 @@ def scale_cluster(cluster, node_group_names_map): instances_list = _scale_cluster_instances( cluster, node_groups_map) _await_instances(cluster) + volumes.attach_to_instances(instances_list) except Exception as ex: LOG.warn("Can't scale cluster: %s", ex) with excutils.save_and_reraise_exception(): @@ -261,16 +262,21 @@ def _rollback_cluster_creation(cluster, ex): def _rollback_cluster_scaling(instances): - # if some nodes are up we should shut them down and update "count" in - # node_group - ng_to_delete = [] - for i in instances: - ng = i.node_group - _shutdown_instance(i) - ng.count -= 1 - if ng.count == 0: - ng_to_delete.append(ng) - return ng_to_delete + try: + volumes.detach_from_instances(instances) + except Exception: + raise + finally: + #if some nodes are up we should shut them down and update "count" in + # node_group + ng_to_delete = [] + for i in instances: + ng = i.node_group + _shutdown_instance(i) + ng.count -= 1 + if ng.count == 0: + ng_to_delete.append(ng) + return ng_to_delete def _shutdown_instances(cluster, quiet=False): diff --git a/savanna/service/volumes.py b/savanna/service/volumes.py index fccf2410..b0a15128 100644 --- a/savanna/service/volumes.py +++ b/savanna/service/volumes.py @@ -30,6 +30,11 @@ def attach(cluster): _attach_volumes_to_node(node_group, instance) +def attach_to_instances(instances): + for instance in instances: + _attach_volumes_to_node(instance.node_group, instance) + + def _await_attach_volume(instance, device_path): timeout = 10 for _ in six.moves.xrange(timeout): @@ -124,11 +129,20 @@ def _mount_volume(instance, device_path, mount_point): def detach(cluster): for node_group in cluster.node_groups: for instance in node_group.instances: - for volume_id in instance.volumes: - volume = cinder.get_volume(volume_id) - try: - volume.detach() - volume.delete() - except Exception: - LOG.error("Can't detach volume %s" % volume.id) - raise + _detach_volume_from_instance(instance) + + +def detach_from_instances(instances): + for instance in instances: + _detach_volume_from_instance(instance) + + +def _detach_volume_from_instance(instance): + for volume_id in instance.volumes: + volume = cinder.get_volume(volume_id) + try: + volume.detach() + volume.delete() + except Exception: + LOG.error("Can't detach volume %s" % volume.id) + raise