diff --git a/sahara/conductor/manager.py b/sahara/conductor/manager.py index 4ce40078..94f278ca 100644 --- a/sahara/conductor/manager.py +++ b/sahara/conductor/manager.py @@ -39,6 +39,7 @@ NODE_GROUP_DEFAULTS = { "volumes_size": 0, "volumes_availability_zone": None, "volume_mount_prefix": "/volumes/disk", + "volume_type": None, "floating_ip_pool": None, "security_groups": None, "auto_security_group": False, diff --git a/sahara/conductor/objects.py b/sahara/conductor/objects.py index 01a315bc..c470e580 100644 --- a/sahara/conductor/objects.py +++ b/sahara/conductor/objects.py @@ -77,6 +77,7 @@ class NodeGroup(object): volumes_availability_zone - name of Cinder availability zone where to spawn volumes volume_mount_prefix + volume_type floating_ip_pool - Floating IP Pool name used to assign Floating IPs to instances in this Node Group security_groups - List of security groups for instances in this Node Group @@ -178,6 +179,7 @@ class NodeGroupTemplate(object): volumes_size volumes_availability_zone volume_mount_prefix + volume_type floating_ip_pool security_groups auto_security_group diff --git a/sahara/db/migration/alembic_migrations/versions/014_add_volume_type.py b/sahara/db/migration/alembic_migrations/versions/014_add_volume_type.py new file mode 100644 index 00000000..544214e1 --- /dev/null +++ b/sahara/db/migration/alembic_migrations/versions/014_add_volume_type.py @@ -0,0 +1,47 @@ +# Copyright 2014 OpenStack Foundation. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""add_volume_type + +Revision ID: 014 +Revises: 013 +Create Date: 2014-10-09 12:47:17.871520 + +""" + +# revision identifiers, used by Alembic. +revision = '014' +down_revision = '013' + +from alembic import op +import sqlalchemy as sa + + +def upgrade(): + op.add_column('node_group_templates', + sa.Column('volume_type', sa.String(length=255), + nullable=True)) + op.add_column('node_groups', + sa.Column('volume_type', sa.String(length=255), + nullable=True)) + op.add_column('templates_relations', + sa.Column('volume_type', sa.String(length=255), + nullable=True)) + + +def downgrade(): + op.drop_column('templates_relations', 'volume_type') + op.drop_column('node_groups', 'volume_type') + op.drop_column('node_group_templates', 'volume_type') diff --git a/sahara/db/sqlalchemy/models.py b/sahara/db/sqlalchemy/models.py index 986ae8e5..70710086 100644 --- a/sahara/db/sqlalchemy/models.py +++ b/sahara/db/sqlalchemy/models.py @@ -101,6 +101,7 @@ class NodeGroup(mb.SaharaBase): volumes_size = sa.Column(sa.Integer) volumes_availability_zone = sa.Column(sa.String(255)) volume_mount_prefix = sa.Column(sa.String(80)) + volume_type = sa.Column(sa.String(255)) count = sa.Column(sa.Integer, nullable=False) instances = relationship('Instance', cascade="all,delete", backref='node_group', @@ -197,6 +198,7 @@ class NodeGroupTemplate(mb.SaharaBase): volumes_size = sa.Column(sa.Integer) volumes_availability_zone = sa.Column(sa.String(255)) volume_mount_prefix = sa.Column(sa.String(80)) + volume_type = sa.Column(sa.String(255)) floating_ip_pool = sa.Column(sa.String(36)) security_groups = sa.Column(st.JsonListType()) auto_security_group = sa.Column(sa.Boolean()) @@ -222,6 +224,7 @@ class TemplatesRelation(mb.SaharaBase): volumes_size = sa.Column(sa.Integer) volumes_availability_zone = sa.Column(sa.String(255)) volume_mount_prefix = sa.Column(sa.String(80)) + volume_type = sa.Column(sa.String(255)) count = sa.Column(sa.Integer, nullable=False) cluster_template_id = sa.Column(sa.String(36), sa.ForeignKey('cluster_templates.id')) diff --git a/sahara/resources/volume.heat b/sahara/resources/volume.heat index ac702ade..a4323728 100644 --- a/sahara/resources/volume.heat +++ b/sahara/resources/volume.heat @@ -3,7 +3,8 @@ "Properties" : { "name" : "%(volume_name)s", %(availability_zone)s - "size" : "%(volumes_size)s" + "size" : "%(volumes_size)s", + %(volume_type)s } }, "%(volume_attach_name)s" : { diff --git a/sahara/service/validations/base.py b/sahara/service/validations/base.py index 660e5640..e58b699a 100644 --- a/sahara/service/validations/base.py +++ b/sahara/service/validations/base.py @@ -135,6 +135,9 @@ def check_node_group_basic_fields(plugin_name, hadoop_version, ng, check_volume_availability_zone_exist( ng['volumes_availability_zone']) + if ng.get('volume_type'): + check_volume_type_exists(ng['volume_type']) + if ng.get('floating_ip_pool'): check_floatingip_pool_exists(ng['name'], ng['floating_ip_pool']) @@ -225,6 +228,14 @@ def check_volume_availability_zone_exist(az): % az) +def check_volume_type_exists(volume_type): + volume_types = cinder.client().volume_types.list(search_opts={'name': + volume_type}) + if len(volume_types) == 1 and volume_types[0] == volume_type: + return + raise ex.NotFoundException(_("Volume type '%s' not found") % volume_type) + + # Cluster creation related checks def check_cluster_unique_name(name): diff --git a/sahara/service/validations/node_group_templates.py b/sahara/service/validations/node_group_templates.py index e5dcfa3b..e4936c78 100644 --- a/sahara/service/validations/node_group_templates.py +++ b/sahara/service/validations/node_group_templates.py @@ -59,6 +59,9 @@ NODE_GROUP_TEMPLATE_SCHEMA = { "type": "integer", "minimum": 1, }, + "volume_type": { + "type": "string" + }, "volumes_availability_zone": { "type": "string", }, diff --git a/sahara/service/volumes.py b/sahara/service/volumes.py index 7d777cd6..4531ad78 100644 --- a/sahara/service/volumes.py +++ b/sahara/service/volumes.py @@ -67,11 +67,12 @@ def _await_attach_volumes(instance, devices): def _attach_volumes_to_node(node_group, instance): ctx = context.ctx() size = node_group.volumes_size + volume_type = node_group.volume_type devices = [] for idx in range(1, node_group.volumes_per_node + 1): display_name = "volume_" + instance.instance_name + "_" + str(idx) device = _create_attach_volume( - ctx, instance, size, display_name, + ctx, instance, size, volume_type, display_name, node_group.volumes_availability_zone) devices.append(device) LOG.debug("Attached volume %s to instance %s" % @@ -82,13 +83,14 @@ def _attach_volumes_to_node(node_group, instance): _mount_volumes_to_node(instance, devices) -def _create_attach_volume(ctx, instance, size, name=None, +def _create_attach_volume(ctx, instance, size, volume_type, name=None, availability_zone=None): if CONF.cinder_api_version == 1: kwargs = {'size': size, 'display_name': name} else: kwargs = {'size': size, 'name': name} + kwargs['volume_type'] = volume_type if availability_zone is not None: kwargs['availability_zone'] = availability_zone diff --git a/sahara/tests/unit/conductor/manager/test_clusters.py b/sahara/tests/unit/conductor/manager/test_clusters.py index b569cdd7..4e719c91 100644 --- a/sahara/tests/unit/conductor/manager/test_clusters.py +++ b/sahara/tests/unit/conductor/manager/test_clusters.py @@ -115,6 +115,7 @@ class ClusterTest(test_base.ConductorManagerTestCase): ng.pop("volumes_size") ng.pop("volumes_per_node") ng.pop("volumes_availability_zone") + ng.pop("volume_type") ng.pop("floating_ip_pool") ng.pop("image_username") ng.pop("open_ports") diff --git a/sahara/tests/unit/conductor/manager/test_templates.py b/sahara/tests/unit/conductor/manager/test_templates.py index ca53dc0d..61254f06 100644 --- a/sahara/tests/unit/conductor/manager/test_templates.py +++ b/sahara/tests/unit/conductor/manager/test_templates.py @@ -190,6 +190,7 @@ class ClusterTemplates(test_base.ConductorManagerTestCase): ng.pop("volumes_size") ng.pop("volumes_per_node") ng.pop("volumes_availability_zone") + ng.pop("volume_type") ng.pop("auto_security_group") self.assertEqual(SAMPLE_CLT["node_groups"], diff --git a/sahara/tests/unit/db/migration/test_migrations.py b/sahara/tests/unit/db/migration/test_migrations.py index 7f1221a9..c0cfa6ff 100644 --- a/sahara/tests/unit/db/migration/test_migrations.py +++ b/sahara/tests/unit/db/migration/test_migrations.py @@ -407,3 +407,8 @@ class TestMigrations(base.BaseWalkMigrationTestCase, base.CommonTestsMixIn): self.assertColumnExists(engine, 'node_groups', 'availability_zone') self.assertColumnExists(engine, 'templates_relations', 'availability_zone') + + def _check_014(self, engine, data): + self.assertColumnExists(engine, 'node_group_templates', 'volume_type') + self.assertColumnExists(engine, 'node_groups', 'volume_type') + self.assertColumnExists(engine, 'templates_relations', 'volume_type') diff --git a/sahara/tests/unit/resources/test_serialize_resources_use_neutron.heat b/sahara/tests/unit/resources/test_serialize_resources_use_neutron.heat index 6bdd7785..7645f440 100644 --- a/sahara/tests/unit/resources/test_serialize_resources_use_neutron.heat +++ b/sahara/tests/unit/resources/test_serialize_resources_use_neutron.heat @@ -35,7 +35,8 @@ "Type" : "OS::Cinder::Volume", "Properties" : { "name" : "cluster-worker-001-volume-0", - "size" : "10" + "size" : "10", + "volume_type" : "vol_type" } }, "cluster-worker-001-volume-attachment-0" : { @@ -50,7 +51,8 @@ "Type" : "OS::Cinder::Volume", "Properties" : { "name" : "cluster-worker-001-volume-1", - "size" : "10" + "size" : "10", + "volume_type": "vol_type" } }, "cluster-worker-001-volume-attachment-1" : { diff --git a/sahara/tests/unit/resources/test_serialize_resources_use_nn_with_autoassignment.heat b/sahara/tests/unit/resources/test_serialize_resources_use_nn_with_autoassignment.heat index e49f5a8b..7cabbe6e 100644 --- a/sahara/tests/unit/resources/test_serialize_resources_use_nn_with_autoassignment.heat +++ b/sahara/tests/unit/resources/test_serialize_resources_use_nn_with_autoassignment.heat @@ -20,7 +20,8 @@ "Type" : "OS::Cinder::Volume", "Properties" : { "name" : "cluster-worker-001-volume-0", - "size" : "10" + "size" : "10", + "volume_type": null } }, "cluster-worker-001-volume-attachment-0" : { @@ -35,7 +36,8 @@ "Type" : "OS::Cinder::Volume", "Properties" : { "name" : "cluster-worker-001-volume-1", - "size" : "10" + "size" : "10", + "volume_type": null } }, "cluster-worker-001-volume-attachment-1" : { diff --git a/sahara/tests/unit/resources/test_serialize_resources_use_nn_without_autoassignment.heat b/sahara/tests/unit/resources/test_serialize_resources_use_nn_without_autoassignment.heat index e2624259..ac898766 100644 --- a/sahara/tests/unit/resources/test_serialize_resources_use_nn_without_autoassignment.heat +++ b/sahara/tests/unit/resources/test_serialize_resources_use_nn_without_autoassignment.heat @@ -33,7 +33,8 @@ "Type" : "OS::Cinder::Volume", "Properties" : { "name" : "cluster-worker-001-volume-0", - "size" : "10" + "size" : "10", + "volume_type": null } }, "cluster-worker-001-volume-attachment-0" : { @@ -48,7 +49,8 @@ "Type" : "OS::Cinder::Volume", "Properties" : { "name" : "cluster-worker-001-volume-1", - "size" : "10" + "size" : "10", + "volume_type": null } }, "cluster-worker-001-volume-attachment-1" : { diff --git a/sahara/tests/unit/service/test_volumes.py b/sahara/tests/unit/service/test_volumes.py index 7ee66116..8d66cd24 100644 --- a/sahara/tests/unit/service/test_volumes.py +++ b/sahara/tests/unit/service/test_volumes.py @@ -98,6 +98,7 @@ class TestAttachVolume(base.SaharaWithDbTestCase): 'volumes_size': 2, 'volumes_availability_zone': None, 'volume_mount_prefix': '/mnt/vols', + 'volume_type': None, 'name': 'master', 'instances': [instance1, instance2]} diff --git a/sahara/tests/unit/utils/test_heat.py b/sahara/tests/unit/utils/test_heat.py index 788d4ea2..7193aab4 100644 --- a/sahara/tests/unit/utils/test_heat.py +++ b/sahara/tests/unit/utils/test_heat.py @@ -58,15 +58,15 @@ class TestClusterTemplate(base.SaharaWithDbTestCase): into Heat templates. """ - def _make_node_groups(self, floating_ip_pool=None): + def _make_node_groups(self, floating_ip_pool=None, volume_type=None): ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1, floating_ip_pool=floating_ip_pool, image_id=None, volumes_per_node=0, volumes_size=0, id=1, - image_username='root') + image_username='root', volume_type=None) ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1, floating_ip_pool=floating_ip_pool, image_id=None, volumes_per_node=2, volumes_size=10, id=2, - image_username='root') + image_username='root', volume_type=volume_type) return ng1, ng2 def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=[]): @@ -110,7 +110,7 @@ class TestClusterTemplate(base.SaharaWithDbTestCase): 'worker' with 2 attached volumes 10GB size each """ - ng1, ng2 = self._make_node_groups('floating') + ng1, ng2 = self._make_node_groups('floating', 'vol_type') cluster = self._make_cluster('private_net', ng1, ng2) heat_template = self._make_heat_template(cluster, ng1, ng2) self.override_config("use_neutron", True) diff --git a/sahara/utils/openstack/heat.py b/sahara/utils/openstack/heat.py index 4704aa52..b225ca68 100644 --- a/sahara/utils/openstack/heat.py +++ b/sahara/utils/openstack/heat.py @@ -244,7 +244,8 @@ class ClusterTemplate(object): for idx in range(0, ng.volumes_per_node): yield self._serialize_volume(inst_name, idx, ng.volumes_size, - ng.volumes_availability_zone) + ng.volumes_availability_zone, + ng.volume_type) def _serialize_port(self, port_name, fixed_net_id, security_groups): fields = {'port_name': port_name, @@ -272,14 +273,22 @@ class ClusterTemplate(object): return _load_template('nova-floating.heat', fields) + def _serialize_volume_type(self, volume_type): + property = '"volume_type" : %s' + if volume_type is None: + return property % 'null' + else: + return property % ('"%s"' % volume_type) + def _serialize_volume(self, inst_name, volume_idx, volumes_size, - volumes_availability_zone): + volumes_availability_zone, volume_type): fields = {'volume_name': _get_volume_name(inst_name, volume_idx), 'volumes_size': volumes_size, 'volume_attach_name': _get_volume_attach_name(inst_name, volume_idx), 'availability_zone': '', - 'instance_name': inst_name} + 'instance_name': inst_name, + 'volume_type': self._serialize_volume_type(volume_type)} if volumes_availability_zone: # Use json.dumps to escape volumes_availability_zone