Support Cinder availability zones

Add a new 'volumes_availability_zone' option for node groups, that
enables creation of volumes in a specific Cinder AZ.

Change-Id: Ia17d1628c7d5d06f0924edc8f92a0d26fff143e0
Implements: blueprint support-cinder-availability-zones
This commit is contained in:
Adrien Vergé 2014-09-19 15:13:41 +02:00
parent ea4e38a198
commit 50cd1f5f9d
15 changed files with 134 additions and 9 deletions

View File

@ -37,6 +37,7 @@ NODE_GROUP_DEFAULTS = {
"node_configs": {}, "node_configs": {},
"volumes_per_node": 0, "volumes_per_node": 0,
"volumes_size": 0, "volumes_size": 0,
"volumes_availability_zone": None,
"volume_mount_prefix": "/volumes/disk", "volume_mount_prefix": "/volumes/disk",
"floating_ip_pool": None, "floating_ip_pool": None,
"security_groups": None, "security_groups": None,

View File

@ -74,6 +74,8 @@ class NodeGroup(object):
see the docs for details see the docs for details
volumes_per_node volumes_per_node
volumes_size volumes_size
volumes_availability_zone - name of Cinder availability zone
where to spawn volumes
volume_mount_prefix volume_mount_prefix
floating_ip_pool - Floating IP Pool name used to assign Floating IPs to floating_ip_pool - Floating IP Pool name used to assign Floating IPs to
instances in this Node Group instances in this Node Group
@ -174,6 +176,7 @@ class NodeGroupTemplate(object):
see the docs for details see the docs for details
volumes_per_node volumes_per_node
volumes_size volumes_size
volumes_availability_zone
volume_mount_prefix volume_mount_prefix
floating_ip_pool floating_ip_pool
security_groups security_groups

View File

@ -0,0 +1,46 @@
# -*- coding: utf-8 -*-
# Copyright 2014, Adrien Vergé <adrien.verge@numergy.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add volumes_availability_zone field to node groups
Revision ID: 013
Revises: 012
Create Date: 2014-09-08 15:37:00.000000
"""
# revision identifiers, used by Alembic.
revision = '013'
down_revision = '012'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('node_group_templates',
sa.Column('volumes_availability_zone',
sa.String(length=255)))
op.add_column('node_groups', sa.Column('volumes_availability_zone',
sa.String(length=255)))
op.add_column('templates_relations', sa.Column('volumes_availability_zone',
sa.String(length=255)))
def downgrade():
op.drop_column('node_group_templates', 'volumes_availability_zone')
op.drop_column('node_groups', 'volumes_availability_zone')
op.drop_column('templates_relations', 'volumes_availability_zone')

View File

@ -99,6 +99,7 @@ class NodeGroup(mb.SaharaBase):
node_configs = sa.Column(st.JsonDictType()) node_configs = sa.Column(st.JsonDictType())
volumes_per_node = sa.Column(sa.Integer) volumes_per_node = sa.Column(sa.Integer)
volumes_size = sa.Column(sa.Integer) volumes_size = sa.Column(sa.Integer)
volumes_availability_zone = sa.Column(sa.String(255))
volume_mount_prefix = sa.Column(sa.String(80)) volume_mount_prefix = sa.Column(sa.String(80))
count = sa.Column(sa.Integer, nullable=False) count = sa.Column(sa.Integer, nullable=False)
instances = relationship('Instance', cascade="all,delete", instances = relationship('Instance', cascade="all,delete",
@ -194,6 +195,7 @@ class NodeGroupTemplate(mb.SaharaBase):
node_configs = sa.Column(st.JsonDictType()) node_configs = sa.Column(st.JsonDictType())
volumes_per_node = sa.Column(sa.Integer, nullable=False) volumes_per_node = sa.Column(sa.Integer, nullable=False)
volumes_size = sa.Column(sa.Integer) volumes_size = sa.Column(sa.Integer)
volumes_availability_zone = sa.Column(sa.String(255))
volume_mount_prefix = sa.Column(sa.String(80)) volume_mount_prefix = sa.Column(sa.String(80))
floating_ip_pool = sa.Column(sa.String(36)) floating_ip_pool = sa.Column(sa.String(36))
security_groups = sa.Column(st.JsonListType()) security_groups = sa.Column(st.JsonListType())
@ -218,6 +220,7 @@ class TemplatesRelation(mb.SaharaBase):
node_configs = sa.Column(st.JsonDictType()) node_configs = sa.Column(st.JsonDictType())
volumes_per_node = sa.Column(sa.Integer) volumes_per_node = sa.Column(sa.Integer)
volumes_size = sa.Column(sa.Integer) volumes_size = sa.Column(sa.Integer)
volumes_availability_zone = sa.Column(sa.String(255))
volume_mount_prefix = sa.Column(sa.String(80)) volume_mount_prefix = sa.Column(sa.String(80))
count = sa.Column(sa.Integer, nullable=False) count = sa.Column(sa.Integer, nullable=False)
cluster_template_id = sa.Column(sa.String(36), cluster_template_id = sa.Column(sa.String(36),

View File

@ -2,6 +2,7 @@
"Type" : "OS::Cinder::Volume", "Type" : "OS::Cinder::Volume",
"Properties" : { "Properties" : {
"name" : "%(volume_name)s", "name" : "%(volume_name)s",
%(availability_zone)s
"size" : "%(volumes_size)s" "size" : "%(volumes_size)s"
} }
}, },

View File

@ -25,6 +25,7 @@ from sahara.i18n import _
import sahara.plugins.base as plugin_base import sahara.plugins.base as plugin_base
import sahara.service.api as api import sahara.service.api as api
from sahara.utils import general as g from sahara.utils import general as g
import sahara.utils.openstack.cinder as cinder
import sahara.utils.openstack.heat as heat import sahara.utils.openstack.heat as heat
import sahara.utils.openstack.keystone as keystone import sahara.utils.openstack.keystone as keystone
import sahara.utils.openstack.nova as nova import sahara.utils.openstack.nova as nova
@ -130,6 +131,9 @@ def check_node_group_basic_fields(plugin_name, hadoop_version, ng,
if ng.get('volumes_per_node'): if ng.get('volumes_per_node'):
check_cinder_exists() check_cinder_exists()
if ng.get('volumes_availability_zone'):
check_volume_availability_zone_exist(
ng['volumes_availability_zone'])
if ng.get('floating_ip_pool'): if ng.get('floating_ip_pool'):
check_floatingip_pool_exists(ng['name'], ng['floating_ip_pool']) check_floatingip_pool_exists(ng['name'], ng['floating_ip_pool'])
@ -209,7 +213,16 @@ def check_availability_zone_exist(az):
az_list = nova.client().availability_zones.list(False) az_list = nova.client().availability_zones.list(False)
az_names = [a.zoneName for a in az_list] az_names = [a.zoneName for a in az_list]
if az not in az_names: if az not in az_names:
raise ex.InvalidException(_("Availability zone '%s' not found") % az) raise ex.InvalidException(_("Nova availability zone '%s' not found")
% az)
def check_volume_availability_zone_exist(az):
az_list = cinder.client().availability_zones.list()
az_names = [a.zoneName for a in az_list]
if az not in az_names:
raise ex.InvalidException(_("Cinder availability zone '%s' not found")
% az)
# Cluster creation related checks # Cluster creation related checks

View File

@ -59,6 +59,9 @@ NODE_GROUP_TEMPLATE_SCHEMA = {
"type": "integer", "type": "integer",
"minimum": 1, "minimum": 1,
}, },
"volumes_availability_zone": {
"type": "string",
},
"volume_mount_prefix": { "volume_mount_prefix": {
"type": "string", "type": "string",
"format": "posix_path", "format": "posix_path",

View File

@ -71,7 +71,8 @@ def _attach_volumes_to_node(node_group, instance):
for idx in range(1, node_group.volumes_per_node + 1): for idx in range(1, node_group.volumes_per_node + 1):
display_name = "volume_" + instance.instance_name + "_" + str(idx) display_name = "volume_" + instance.instance_name + "_" + str(idx)
device = _create_attach_volume( device = _create_attach_volume(
ctx, instance, size, display_name) ctx, instance, size, display_name,
node_group.volumes_availability_zone)
devices.append(device) devices.append(device)
LOG.debug("Attached volume %s to instance %s" % LOG.debug("Attached volume %s to instance %s" %
(device, instance.instance_id)) (device, instance.instance_id))
@ -81,11 +82,16 @@ def _attach_volumes_to_node(node_group, instance):
_mount_volumes_to_node(instance, devices) _mount_volumes_to_node(instance, devices)
def _create_attach_volume(ctx, instance, size, name=None): def _create_attach_volume(ctx, instance, size, name=None,
availability_zone=None):
if CONF.cinder_api_version == 1: if CONF.cinder_api_version == 1:
kwargs = {'size': size, 'display_name': name} kwargs = {'size': size, 'display_name': name}
else: else:
kwargs = {'size': size, 'name': name} kwargs = {'size': size, 'name': name}
if availability_zone is not None:
kwargs['availability_zone'] = availability_zone
volume = cinder.client().volumes.create(**kwargs) volume = cinder.client().volumes.create(**kwargs)
conductor.append_volume(ctx, instance, volume.id) conductor.append_volume(ctx, instance, volume.id)

View File

@ -114,6 +114,7 @@ class ClusterTest(test_base.ConductorManagerTestCase):
ng.pop("volume_mount_prefix") ng.pop("volume_mount_prefix")
ng.pop("volumes_size") ng.pop("volumes_size")
ng.pop("volumes_per_node") ng.pop("volumes_per_node")
ng.pop("volumes_availability_zone")
ng.pop("floating_ip_pool") ng.pop("floating_ip_pool")
ng.pop("image_username") ng.pop("image_username")
ng.pop("open_ports") ng.pop("open_ports")

View File

@ -189,6 +189,7 @@ class ClusterTemplates(test_base.ConductorManagerTestCase):
ng.pop("volume_mount_prefix") ng.pop("volume_mount_prefix")
ng.pop("volumes_size") ng.pop("volumes_size")
ng.pop("volumes_per_node") ng.pop("volumes_per_node")
ng.pop("volumes_availability_zone")
ng.pop("auto_security_group") ng.pop("auto_security_group")
self.assertEqual(SAMPLE_CLT["node_groups"], self.assertEqual(SAMPLE_CLT["node_groups"],

View File

@ -96,6 +96,7 @@ class TestAttachVolume(base.SaharaWithDbTestCase):
ng = {'volumes_per_node': 2, ng = {'volumes_per_node': 2,
'volumes_size': 2, 'volumes_size': 2,
'volumes_availability_zone': None,
'volume_mount_prefix': '/mnt/vols', 'volume_mount_prefix': '/mnt/vols',
'name': 'master', 'name': 'master',
'instances': [instance1, instance2]} 'instances': [instance1, instance2]}

View File

@ -357,7 +357,9 @@ class TestClusterCreateValidation(u.ValidationTestCase):
'security_groups': [], 'security_groups': [],
'floating_ip_pool': 'floating_ip_pool':
'd9a3bebc-f788-4b81-9a93-aa048022c1ca', 'd9a3bebc-f788-4b81-9a93-aa048022c1ca',
'availability_zone': 'nova' 'availability_zone': 'nova',
'volumes_per_node': 1,
'volumes_availability_zone': 'nova'
} }
] ]
} }
@ -388,7 +390,36 @@ class TestClusterCreateValidation(u.ValidationTestCase):
] ]
}, },
bad_req_i=(1, 'INVALID_REFERENCE', bad_req_i=(1, 'INVALID_REFERENCE',
"Availability zone 'nonexistent' not found") "Nova availability zone 'nonexistent' not found")
)
def test_cluster_create_wrong_volumes_availability_zone(self):
self.override_config('use_neutron', True)
self._assert_create_object_validation(
data={
'name': 'testname',
'plugin_name': 'vanilla',
'hadoop_version': '1.2.1',
'user_keypair_id': 'test_keypair',
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
'neutron_management_network': 'd9a3bebc-f788-4b81-'
'9a93-aa048022c1ca',
'node_groups': [
{
'name': 'nodegroup',
'node_processes': ['namenode'],
'flavor_id': '42',
'count': 100,
'security_groups': [],
'floating_ip_pool':
'd9a3bebc-f788-4b81-9a93-aa048022c1ca',
'volumes_per_node': 1,
'volumes_availability_zone': 'nonexistent'
}
]
},
bad_req_i=(1, 'INVALID_REFERENCE',
"Cinder availability zone 'nonexistent' not found")
) )

View File

@ -132,6 +132,7 @@ def start_patch(patch_templates=True):
nova_p = mock.patch("sahara.utils.openstack.nova.client") nova_p = mock.patch("sahara.utils.openstack.nova.client")
keystone_p = mock.patch("sahara.utils.openstack.keystone._client") keystone_p = mock.patch("sahara.utils.openstack.keystone._client")
heat_p = mock.patch("sahara.utils.openstack.heat.client") heat_p = mock.patch("sahara.utils.openstack.heat.client")
cinder_p = mock.patch("sahara.utils.openstack.cinder.client")
get_image_p = mock.patch("sahara.service.api.get_image") get_image_p = mock.patch("sahara.service.api.get_image")
get_image = get_image_p.start() get_image = get_image_p.start()
@ -160,6 +161,9 @@ def start_patch(patch_templates=True):
heat = heat_p.start() heat = heat_p.start()
heat().stacks.list.side_effect = _get_heat_stack_list heat().stacks.list.side_effect = _get_heat_stack_list
cinder = cinder_p.start()
cinder().availability_zones.list.side_effect = _get_availability_zone_list
class Service(object): class Service(object):
@property @property
def name(self): def name(self):
@ -228,7 +232,7 @@ def start_patch(patch_templates=True):
get_ng_template.side_effect = _get_ng_template get_ng_template.side_effect = _get_ng_template
# request data to validate # request data to validate
patchers = [get_clusters_p, get_cluster_p, patchers = [get_clusters_p, get_cluster_p,
nova_p, keystone_p, get_image_p, heat_p] nova_p, keystone_p, get_image_p, heat_p, cinder_p]
if patch_templates: if patch_templates:
patchers.extend([get_ng_template_p, get_ng_templates_p, patchers.extend([get_ng_template_p, get_ng_templates_p,
get_cl_template_p, get_cl_templates_p]) get_cl_template_p, get_cl_templates_p])

View File

@ -35,7 +35,8 @@ def make_ng_dict(name, flavor, processes, count, instances=None, **kwargs):
dct = {'name': name, 'flavor_id': flavor, 'node_processes': processes, dct = {'name': name, 'flavor_id': flavor, 'node_processes': processes,
'count': count, 'instances': instances, 'node_configs': {}, 'count': count, 'instances': instances, 'node_configs': {},
'security_groups': None, 'auto_security_group': False, 'security_groups': None, 'auto_security_group': False,
'availability_zone': None, 'open_ports': []} 'availability_zone': None, 'volumes_availability_zone': None,
'open_ports': []}
dct.update(kwargs) dct.update(kwargs)
return dct return dct

View File

@ -243,7 +243,8 @@ class ClusterTemplate(object):
yield _load_template('instance.heat', fields) yield _load_template('instance.heat', fields)
for idx in range(0, ng.volumes_per_node): for idx in range(0, ng.volumes_per_node):
yield self._serialize_volume(inst_name, idx, ng.volumes_size) yield self._serialize_volume(inst_name, idx, ng.volumes_size,
ng.volumes_availability_zone)
def _serialize_port(self, port_name, fixed_net_id, security_groups): def _serialize_port(self, port_name, fixed_net_id, security_groups):
fields = {'port_name': port_name, fields = {'port_name': port_name,
@ -271,13 +272,22 @@ class ClusterTemplate(object):
return _load_template('nova-floating.heat', fields) return _load_template('nova-floating.heat', fields)
def _serialize_volume(self, inst_name, volume_idx, volumes_size): def _serialize_volume(self, inst_name, volume_idx, volumes_size,
volumes_availability_zone):
fields = {'volume_name': _get_volume_name(inst_name, volume_idx), fields = {'volume_name': _get_volume_name(inst_name, volume_idx),
'volumes_size': volumes_size, 'volumes_size': volumes_size,
'volume_attach_name': _get_volume_attach_name(inst_name, 'volume_attach_name': _get_volume_attach_name(inst_name,
volume_idx), volume_idx),
'availability_zone': '',
'instance_name': inst_name} 'instance_name': inst_name}
if volumes_availability_zone:
# Use json.dumps to escape volumes_availability_zone
# (in case it contains quotes)
fields['availability_zone'] = (
'"availability_zone": %s,' %
json.dumps(volumes_availability_zone))
return _load_template('volume.heat', fields) return _load_template('volume.heat', fields)
def _get_security_groups(self, node_group): def _get_security_groups(self, node_group):