From 50cd1f5f9db25bab7b9502d20692c78bc3b13521 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adrien=20Verg=C3=A9?= Date: Fri, 19 Sep 2014 15:13:41 +0200 Subject: [PATCH] Support Cinder availability zones Add a new 'volumes_availability_zone' option for node groups, that enables creation of volumes in a specific Cinder AZ. Change-Id: Ia17d1628c7d5d06f0924edc8f92a0d26fff143e0 Implements: blueprint support-cinder-availability-zones --- sahara/conductor/manager.py | 1 + sahara/conductor/objects.py | 3 ++ .../versions/013_volumes_availability_zone.py | 46 +++++++++++++++++++ sahara/db/sqlalchemy/models.py | 3 ++ sahara/resources/volume.heat | 1 + sahara/service/validations/base.py | 15 +++++- .../validations/node_group_templates.py | 3 ++ sahara/service/volumes.py | 10 +++- .../unit/conductor/manager/test_clusters.py | 1 + .../unit/conductor/manager/test_templates.py | 1 + sahara/tests/unit/service/test_volumes.py | 1 + .../test_cluster_create_validation.py | 35 +++++++++++++- sahara/tests/unit/service/validation/utils.py | 6 ++- sahara/tests/unit/testutils.py | 3 +- sahara/utils/openstack/heat.py | 14 +++++- 15 files changed, 134 insertions(+), 9 deletions(-) create mode 100644 sahara/db/migration/alembic_migrations/versions/013_volumes_availability_zone.py diff --git a/sahara/conductor/manager.py b/sahara/conductor/manager.py index 7ffbb932..4ce40078 100644 --- a/sahara/conductor/manager.py +++ b/sahara/conductor/manager.py @@ -37,6 +37,7 @@ NODE_GROUP_DEFAULTS = { "node_configs": {}, "volumes_per_node": 0, "volumes_size": 0, + "volumes_availability_zone": None, "volume_mount_prefix": "/volumes/disk", "floating_ip_pool": None, "security_groups": None, diff --git a/sahara/conductor/objects.py b/sahara/conductor/objects.py index 8781ad44..01a315bc 100644 --- a/sahara/conductor/objects.py +++ b/sahara/conductor/objects.py @@ -74,6 +74,8 @@ class NodeGroup(object): see the docs for details volumes_per_node volumes_size + volumes_availability_zone - name of Cinder availability zone + where to spawn volumes volume_mount_prefix floating_ip_pool - Floating IP Pool name used to assign Floating IPs to instances in this Node Group @@ -174,6 +176,7 @@ class NodeGroupTemplate(object): see the docs for details volumes_per_node volumes_size + volumes_availability_zone volume_mount_prefix floating_ip_pool security_groups diff --git a/sahara/db/migration/alembic_migrations/versions/013_volumes_availability_zone.py b/sahara/db/migration/alembic_migrations/versions/013_volumes_availability_zone.py new file mode 100644 index 00000000..8954f873 --- /dev/null +++ b/sahara/db/migration/alembic_migrations/versions/013_volumes_availability_zone.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2014, Adrien Vergé +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""add volumes_availability_zone field to node groups + +Revision ID: 013 +Revises: 012 +Create Date: 2014-09-08 15:37:00.000000 + +""" + +# revision identifiers, used by Alembic. +revision = '013' +down_revision = '012' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.add_column('node_group_templates', + sa.Column('volumes_availability_zone', + sa.String(length=255))) + op.add_column('node_groups', sa.Column('volumes_availability_zone', + sa.String(length=255))) + op.add_column('templates_relations', sa.Column('volumes_availability_zone', + sa.String(length=255))) + + +def downgrade(): + op.drop_column('node_group_templates', 'volumes_availability_zone') + op.drop_column('node_groups', 'volumes_availability_zone') + op.drop_column('templates_relations', 'volumes_availability_zone') diff --git a/sahara/db/sqlalchemy/models.py b/sahara/db/sqlalchemy/models.py index 7770ad24..986ae8e5 100644 --- a/sahara/db/sqlalchemy/models.py +++ b/sahara/db/sqlalchemy/models.py @@ -99,6 +99,7 @@ class NodeGroup(mb.SaharaBase): node_configs = sa.Column(st.JsonDictType()) volumes_per_node = sa.Column(sa.Integer) volumes_size = sa.Column(sa.Integer) + volumes_availability_zone = sa.Column(sa.String(255)) volume_mount_prefix = sa.Column(sa.String(80)) count = sa.Column(sa.Integer, nullable=False) instances = relationship('Instance', cascade="all,delete", @@ -194,6 +195,7 @@ class NodeGroupTemplate(mb.SaharaBase): node_configs = sa.Column(st.JsonDictType()) volumes_per_node = sa.Column(sa.Integer, nullable=False) volumes_size = sa.Column(sa.Integer) + volumes_availability_zone = sa.Column(sa.String(255)) volume_mount_prefix = sa.Column(sa.String(80)) floating_ip_pool = sa.Column(sa.String(36)) security_groups = sa.Column(st.JsonListType()) @@ -218,6 +220,7 @@ class TemplatesRelation(mb.SaharaBase): node_configs = sa.Column(st.JsonDictType()) volumes_per_node = sa.Column(sa.Integer) volumes_size = sa.Column(sa.Integer) + volumes_availability_zone = sa.Column(sa.String(255)) volume_mount_prefix = sa.Column(sa.String(80)) count = sa.Column(sa.Integer, nullable=False) cluster_template_id = sa.Column(sa.String(36), diff --git a/sahara/resources/volume.heat b/sahara/resources/volume.heat index a214549c..ac702ade 100644 --- a/sahara/resources/volume.heat +++ b/sahara/resources/volume.heat @@ -2,6 +2,7 @@ "Type" : "OS::Cinder::Volume", "Properties" : { "name" : "%(volume_name)s", + %(availability_zone)s "size" : "%(volumes_size)s" } }, diff --git a/sahara/service/validations/base.py b/sahara/service/validations/base.py index aaae8e87..660e5640 100644 --- a/sahara/service/validations/base.py +++ b/sahara/service/validations/base.py @@ -25,6 +25,7 @@ from sahara.i18n import _ import sahara.plugins.base as plugin_base import sahara.service.api as api from sahara.utils import general as g +import sahara.utils.openstack.cinder as cinder import sahara.utils.openstack.heat as heat import sahara.utils.openstack.keystone as keystone import sahara.utils.openstack.nova as nova @@ -130,6 +131,9 @@ def check_node_group_basic_fields(plugin_name, hadoop_version, ng, if ng.get('volumes_per_node'): check_cinder_exists() + if ng.get('volumes_availability_zone'): + check_volume_availability_zone_exist( + ng['volumes_availability_zone']) if ng.get('floating_ip_pool'): check_floatingip_pool_exists(ng['name'], ng['floating_ip_pool']) @@ -209,7 +213,16 @@ def check_availability_zone_exist(az): az_list = nova.client().availability_zones.list(False) az_names = [a.zoneName for a in az_list] if az not in az_names: - raise ex.InvalidException(_("Availability zone '%s' not found") % az) + raise ex.InvalidException(_("Nova availability zone '%s' not found") + % az) + + +def check_volume_availability_zone_exist(az): + az_list = cinder.client().availability_zones.list() + az_names = [a.zoneName for a in az_list] + if az not in az_names: + raise ex.InvalidException(_("Cinder availability zone '%s' not found") + % az) # Cluster creation related checks diff --git a/sahara/service/validations/node_group_templates.py b/sahara/service/validations/node_group_templates.py index 77c641f5..e5dcfa3b 100644 --- a/sahara/service/validations/node_group_templates.py +++ b/sahara/service/validations/node_group_templates.py @@ -59,6 +59,9 @@ NODE_GROUP_TEMPLATE_SCHEMA = { "type": "integer", "minimum": 1, }, + "volumes_availability_zone": { + "type": "string", + }, "volume_mount_prefix": { "type": "string", "format": "posix_path", diff --git a/sahara/service/volumes.py b/sahara/service/volumes.py index 7362db76..7d777cd6 100644 --- a/sahara/service/volumes.py +++ b/sahara/service/volumes.py @@ -71,7 +71,8 @@ def _attach_volumes_to_node(node_group, instance): for idx in range(1, node_group.volumes_per_node + 1): display_name = "volume_" + instance.instance_name + "_" + str(idx) device = _create_attach_volume( - ctx, instance, size, display_name) + ctx, instance, size, display_name, + node_group.volumes_availability_zone) devices.append(device) LOG.debug("Attached volume %s to instance %s" % (device, instance.instance_id)) @@ -81,11 +82,16 @@ def _attach_volumes_to_node(node_group, instance): _mount_volumes_to_node(instance, devices) -def _create_attach_volume(ctx, instance, size, name=None): +def _create_attach_volume(ctx, instance, size, name=None, + availability_zone=None): if CONF.cinder_api_version == 1: kwargs = {'size': size, 'display_name': name} else: kwargs = {'size': size, 'name': name} + + if availability_zone is not None: + kwargs['availability_zone'] = availability_zone + volume = cinder.client().volumes.create(**kwargs) conductor.append_volume(ctx, instance, volume.id) diff --git a/sahara/tests/unit/conductor/manager/test_clusters.py b/sahara/tests/unit/conductor/manager/test_clusters.py index 3cfc743e..b569cdd7 100644 --- a/sahara/tests/unit/conductor/manager/test_clusters.py +++ b/sahara/tests/unit/conductor/manager/test_clusters.py @@ -114,6 +114,7 @@ class ClusterTest(test_base.ConductorManagerTestCase): ng.pop("volume_mount_prefix") ng.pop("volumes_size") ng.pop("volumes_per_node") + ng.pop("volumes_availability_zone") ng.pop("floating_ip_pool") ng.pop("image_username") ng.pop("open_ports") diff --git a/sahara/tests/unit/conductor/manager/test_templates.py b/sahara/tests/unit/conductor/manager/test_templates.py index cc920934..ca53dc0d 100644 --- a/sahara/tests/unit/conductor/manager/test_templates.py +++ b/sahara/tests/unit/conductor/manager/test_templates.py @@ -189,6 +189,7 @@ class ClusterTemplates(test_base.ConductorManagerTestCase): ng.pop("volume_mount_prefix") ng.pop("volumes_size") ng.pop("volumes_per_node") + ng.pop("volumes_availability_zone") ng.pop("auto_security_group") self.assertEqual(SAMPLE_CLT["node_groups"], diff --git a/sahara/tests/unit/service/test_volumes.py b/sahara/tests/unit/service/test_volumes.py index 49614e28..7ee66116 100644 --- a/sahara/tests/unit/service/test_volumes.py +++ b/sahara/tests/unit/service/test_volumes.py @@ -96,6 +96,7 @@ class TestAttachVolume(base.SaharaWithDbTestCase): ng = {'volumes_per_node': 2, 'volumes_size': 2, + 'volumes_availability_zone': None, 'volume_mount_prefix': '/mnt/vols', 'name': 'master', 'instances': [instance1, instance2]} diff --git a/sahara/tests/unit/service/validation/test_cluster_create_validation.py b/sahara/tests/unit/service/validation/test_cluster_create_validation.py index 28c7c3ed..7ccc3140 100644 --- a/sahara/tests/unit/service/validation/test_cluster_create_validation.py +++ b/sahara/tests/unit/service/validation/test_cluster_create_validation.py @@ -357,7 +357,9 @@ class TestClusterCreateValidation(u.ValidationTestCase): 'security_groups': [], 'floating_ip_pool': 'd9a3bebc-f788-4b81-9a93-aa048022c1ca', - 'availability_zone': 'nova' + 'availability_zone': 'nova', + 'volumes_per_node': 1, + 'volumes_availability_zone': 'nova' } ] } @@ -388,7 +390,36 @@ class TestClusterCreateValidation(u.ValidationTestCase): ] }, bad_req_i=(1, 'INVALID_REFERENCE', - "Availability zone 'nonexistent' not found") + "Nova availability zone 'nonexistent' not found") + ) + + def test_cluster_create_wrong_volumes_availability_zone(self): + self.override_config('use_neutron', True) + self._assert_create_object_validation( + data={ + 'name': 'testname', + 'plugin_name': 'vanilla', + 'hadoop_version': '1.2.1', + 'user_keypair_id': 'test_keypair', + 'default_image_id': '550e8400-e29b-41d4-a716-446655440000', + 'neutron_management_network': 'd9a3bebc-f788-4b81-' + '9a93-aa048022c1ca', + 'node_groups': [ + { + 'name': 'nodegroup', + 'node_processes': ['namenode'], + 'flavor_id': '42', + 'count': 100, + 'security_groups': [], + 'floating_ip_pool': + 'd9a3bebc-f788-4b81-9a93-aa048022c1ca', + 'volumes_per_node': 1, + 'volumes_availability_zone': 'nonexistent' + } + ] + }, + bad_req_i=(1, 'INVALID_REFERENCE', + "Cinder availability zone 'nonexistent' not found") ) diff --git a/sahara/tests/unit/service/validation/utils.py b/sahara/tests/unit/service/validation/utils.py index 57f72edb..861611f3 100644 --- a/sahara/tests/unit/service/validation/utils.py +++ b/sahara/tests/unit/service/validation/utils.py @@ -132,6 +132,7 @@ def start_patch(patch_templates=True): nova_p = mock.patch("sahara.utils.openstack.nova.client") keystone_p = mock.patch("sahara.utils.openstack.keystone._client") heat_p = mock.patch("sahara.utils.openstack.heat.client") + cinder_p = mock.patch("sahara.utils.openstack.cinder.client") get_image_p = mock.patch("sahara.service.api.get_image") get_image = get_image_p.start() @@ -160,6 +161,9 @@ def start_patch(patch_templates=True): heat = heat_p.start() heat().stacks.list.side_effect = _get_heat_stack_list + cinder = cinder_p.start() + cinder().availability_zones.list.side_effect = _get_availability_zone_list + class Service(object): @property def name(self): @@ -228,7 +232,7 @@ def start_patch(patch_templates=True): get_ng_template.side_effect = _get_ng_template # request data to validate patchers = [get_clusters_p, get_cluster_p, - nova_p, keystone_p, get_image_p, heat_p] + nova_p, keystone_p, get_image_p, heat_p, cinder_p] if patch_templates: patchers.extend([get_ng_template_p, get_ng_templates_p, get_cl_template_p, get_cl_templates_p]) diff --git a/sahara/tests/unit/testutils.py b/sahara/tests/unit/testutils.py index 32c3785d..1625ebb0 100644 --- a/sahara/tests/unit/testutils.py +++ b/sahara/tests/unit/testutils.py @@ -35,7 +35,8 @@ def make_ng_dict(name, flavor, processes, count, instances=None, **kwargs): dct = {'name': name, 'flavor_id': flavor, 'node_processes': processes, 'count': count, 'instances': instances, 'node_configs': {}, 'security_groups': None, 'auto_security_group': False, - 'availability_zone': None, 'open_ports': []} + 'availability_zone': None, 'volumes_availability_zone': None, + 'open_ports': []} dct.update(kwargs) return dct diff --git a/sahara/utils/openstack/heat.py b/sahara/utils/openstack/heat.py index 3a1d550f..4704aa52 100644 --- a/sahara/utils/openstack/heat.py +++ b/sahara/utils/openstack/heat.py @@ -243,7 +243,8 @@ class ClusterTemplate(object): yield _load_template('instance.heat', fields) for idx in range(0, ng.volumes_per_node): - yield self._serialize_volume(inst_name, idx, ng.volumes_size) + yield self._serialize_volume(inst_name, idx, ng.volumes_size, + ng.volumes_availability_zone) def _serialize_port(self, port_name, fixed_net_id, security_groups): fields = {'port_name': port_name, @@ -271,13 +272,22 @@ class ClusterTemplate(object): return _load_template('nova-floating.heat', fields) - def _serialize_volume(self, inst_name, volume_idx, volumes_size): + def _serialize_volume(self, inst_name, volume_idx, volumes_size, + volumes_availability_zone): fields = {'volume_name': _get_volume_name(inst_name, volume_idx), 'volumes_size': volumes_size, 'volume_attach_name': _get_volume_attach_name(inst_name, volume_idx), + 'availability_zone': '', 'instance_name': inst_name} + if volumes_availability_zone: + # Use json.dumps to escape volumes_availability_zone + # (in case it contains quotes) + fields['availability_zone'] = ( + '"availability_zone": %s,' % + json.dumps(volumes_availability_zone)) + return _load_template('volume.heat', fields) def _get_security_groups(self, node_group):