Allow docker_volume_size on cluster create
Add docker_volume_size as an option during cluster create. If not given, the default is taken from the cluster template. Add docker_volume_size in the Cluster object and use that instead of the one from ClusterTemplate. Update both magnum and magnum cli documentation to reflect the above changes. Partial-Bug: #1697648 Implements: blueprint flatten-attributes Change-Id: Ic6d77e6fdf5b068fa5319b238f4fd98b4d499be4
This commit is contained in:
parent
7a5dc9c8e9
commit
79039bb419
@ -153,7 +153,7 @@ class DcosCentosTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||||||
extra_params=extra_params,
|
extra_params=extra_params,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
def get_env_files(self, cluster_template):
|
def get_env_files(self, cluster_template, cluster):
|
||||||
env_files = []
|
env_files = []
|
||||||
|
|
||||||
template_def.add_priv_net_env_file(env_files, cluster_template)
|
template_def.add_priv_net_env_file(env_files, cluster_template)
|
||||||
|
@ -44,7 +44,7 @@ class JeOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||||||
extra_params=extra_params,
|
extra_params=extra_params,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
def get_env_files(self, cluster_template):
|
def get_env_files(self, cluster_template, cluster):
|
||||||
env_files = []
|
env_files = []
|
||||||
if cluster_template.master_lb_enabled:
|
if cluster_template.master_lb_enabled:
|
||||||
env_files.append(
|
env_files.append(
|
||||||
|
@ -212,6 +212,7 @@ They are loosely grouped as: mandatory, infrastructure, COE specific.
|
|||||||
the above size. If not specified, images will be stored in the compute
|
the above size. If not specified, images will be stored in the compute
|
||||||
instance's local disk. For the 'devicemapper' storage driver, the minimum
|
instance's local disk. For the 'devicemapper' storage driver, the minimum
|
||||||
value is 3GB. For the 'overlay' storage driver, the minimum value is 1GB.
|
value is 3GB. For the 'overlay' storage driver, the minimum value is 1GB.
|
||||||
|
This value can be overridden at cluster creation.
|
||||||
|
|
||||||
--docker-storage-driver \<docker-storage-driver\>
|
--docker-storage-driver \<docker-storage-driver\>
|
||||||
The name of a driver to manage the storage for the images and the
|
The name of a driver to manage the storage for the images and the
|
||||||
|
@ -89,6 +89,9 @@ class Bay(base.APIBase):
|
|||||||
master_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1)
|
master_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1)
|
||||||
"""The number of master nodes for this bay. Default to 1 if not set"""
|
"""The number of master nodes for this bay. Default to 1 if not set"""
|
||||||
|
|
||||||
|
docker_volume_size = wtypes.IntegerType(minimum=1)
|
||||||
|
"""The size in GB of the docker volume"""
|
||||||
|
|
||||||
bay_create_timeout = wsme.wsattr(wtypes.IntegerType(minimum=0), default=60)
|
bay_create_timeout = wsme.wsattr(wtypes.IntegerType(minimum=0), default=60)
|
||||||
"""Timeout for creating the bay in minutes. Default to 60 if not set"""
|
"""Timeout for creating the bay in minutes. Default to 60 if not set"""
|
||||||
|
|
||||||
@ -171,6 +174,7 @@ class Bay(base.APIBase):
|
|||||||
def _convert_with_links(bay, url, expand=True):
|
def _convert_with_links(bay, url, expand=True):
|
||||||
if not expand:
|
if not expand:
|
||||||
bay.unset_fields_except(['uuid', 'name', 'baymodel_id',
|
bay.unset_fields_except(['uuid', 'name', 'baymodel_id',
|
||||||
|
'docker_volume_size',
|
||||||
'node_count', 'status',
|
'node_count', 'status',
|
||||||
'bay_create_timeout', 'master_count',
|
'bay_create_timeout', 'master_count',
|
||||||
'stack_id'])
|
'stack_id'])
|
||||||
@ -194,6 +198,7 @@ class Bay(base.APIBase):
|
|||||||
baymodel_id='4a96ac4b-2447-43f1-8ca6-9fd6f36d146d',
|
baymodel_id='4a96ac4b-2447-43f1-8ca6-9fd6f36d146d',
|
||||||
node_count=2,
|
node_count=2,
|
||||||
master_count=1,
|
master_count=1,
|
||||||
|
docker_volume_size=1,
|
||||||
bay_create_timeout=15,
|
bay_create_timeout=15,
|
||||||
stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63',
|
stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63',
|
||||||
status=fields.ClusterStatus.CREATE_COMPLETE,
|
status=fields.ClusterStatus.CREATE_COMPLETE,
|
||||||
@ -415,6 +420,10 @@ class BaysController(base.Controller):
|
|||||||
baymodel = objects.ClusterTemplate.get_by_uuid(context,
|
baymodel = objects.ClusterTemplate.get_by_uuid(context,
|
||||||
bay.baymodel_id)
|
bay.baymodel_id)
|
||||||
|
|
||||||
|
# If docker_volume_size is not present, use baymodel value
|
||||||
|
if bay.docker_volume_size is None:
|
||||||
|
bay.docker_volume_size = baymodel.docker_volume_size
|
||||||
|
|
||||||
bay_dict = bay.as_dict()
|
bay_dict = bay.as_dict()
|
||||||
bay_dict['keypair'] = baymodel.keypair_id
|
bay_dict['keypair'] = baymodel.keypair_id
|
||||||
attr_validator.validate_os_resources(context, baymodel.as_dict(),
|
attr_validator.validate_os_resources(context, baymodel.as_dict(),
|
||||||
|
@ -107,6 +107,9 @@ class Cluster(base.APIBase):
|
|||||||
master_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1)
|
master_count = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1)
|
||||||
"""The number of master nodes for this cluster. Default to 1 if not set"""
|
"""The number of master nodes for this cluster. Default to 1 if not set"""
|
||||||
|
|
||||||
|
docker_volume_size = wtypes.IntegerType(minimum=1)
|
||||||
|
"""The size in GB of the docker volume"""
|
||||||
|
|
||||||
create_timeout = wsme.wsattr(wtypes.IntegerType(minimum=0), default=60)
|
create_timeout = wsme.wsattr(wtypes.IntegerType(minimum=0), default=60)
|
||||||
"""Timeout for creating the cluster in minutes. Default to 60 if not set"""
|
"""Timeout for creating the cluster in minutes. Default to 60 if not set"""
|
||||||
|
|
||||||
@ -158,7 +161,8 @@ class Cluster(base.APIBase):
|
|||||||
def _convert_with_links(cluster, url, expand=True):
|
def _convert_with_links(cluster, url, expand=True):
|
||||||
if not expand:
|
if not expand:
|
||||||
cluster.unset_fields_except(['uuid', 'name', 'cluster_template_id',
|
cluster.unset_fields_except(['uuid', 'name', 'cluster_template_id',
|
||||||
'keypair', 'node_count', 'status',
|
'keypair', 'docker_volume_size',
|
||||||
|
'node_count', 'status',
|
||||||
'create_timeout', 'master_count',
|
'create_timeout', 'master_count',
|
||||||
'stack_id'])
|
'stack_id'])
|
||||||
|
|
||||||
@ -183,6 +187,7 @@ class Cluster(base.APIBase):
|
|||||||
keypair=None,
|
keypair=None,
|
||||||
node_count=2,
|
node_count=2,
|
||||||
master_count=1,
|
master_count=1,
|
||||||
|
docker_volume_size=1,
|
||||||
create_timeout=15,
|
create_timeout=15,
|
||||||
stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63',
|
stack_id='49dc23f5-ffc9-40c3-9d34-7be7f9e34d63',
|
||||||
status=fields.ClusterStatus.CREATE_COMPLETE,
|
status=fields.ClusterStatus.CREATE_COMPLETE,
|
||||||
@ -375,6 +380,7 @@ class ClustersController(base.Controller):
|
|||||||
|
|
||||||
@expose.expose(ClusterID, body=Cluster, status_code=202)
|
@expose.expose(ClusterID, body=Cluster, status_code=202)
|
||||||
@validation.enforce_cluster_type_supported()
|
@validation.enforce_cluster_type_supported()
|
||||||
|
@validation.enforce_cluster_volume_storage_size()
|
||||||
def post(self, cluster):
|
def post(self, cluster):
|
||||||
"""Create a new cluster.
|
"""Create a new cluster.
|
||||||
|
|
||||||
@ -393,6 +399,10 @@ class ClustersController(base.Controller):
|
|||||||
if cluster.keypair is None:
|
if cluster.keypair is None:
|
||||||
cluster.keypair = cluster_template.keypair_id
|
cluster.keypair = cluster_template.keypair_id
|
||||||
|
|
||||||
|
# If docker_volume_size is not present, use cluster_template value
|
||||||
|
if cluster.docker_volume_size is None:
|
||||||
|
cluster.docker_volume_size = cluster_template.docker_volume_size
|
||||||
|
|
||||||
cluster_dict = cluster.as_dict()
|
cluster_dict = cluster.as_dict()
|
||||||
|
|
||||||
attr_validator.validate_os_resources(context,
|
attr_validator.validate_os_resources(context,
|
||||||
|
@ -47,6 +47,19 @@ def enforce_cluster_type_supported():
|
|||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
def enforce_cluster_volume_storage_size():
|
||||||
|
@decorator.decorator
|
||||||
|
def wrapper(func, *args, **kwargs):
|
||||||
|
cluster = args[1]
|
||||||
|
cluster_template = objects.ClusterTemplate.get_by_uuid(
|
||||||
|
pecan.request.context, cluster.cluster_template_id)
|
||||||
|
_enforce_volume_storage_size(
|
||||||
|
cluster_template.as_dict(), cluster.as_dict())
|
||||||
|
return func(*args, **kwargs)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
def enforce_valid_project_id_on_create():
|
def enforce_valid_project_id_on_create():
|
||||||
@decorator.decorator
|
@decorator.decorator
|
||||||
def wrapper(func, *args, **kwargs):
|
def wrapper(func, *args, **kwargs):
|
||||||
@ -133,7 +146,7 @@ def enforce_volume_storage_size_create():
|
|||||||
@decorator.decorator
|
@decorator.decorator
|
||||||
def wrapper(func, *args, **kwargs):
|
def wrapper(func, *args, **kwargs):
|
||||||
cluster_template = args[1]
|
cluster_template = args[1]
|
||||||
_enforce_volume_storage_size(cluster_template.as_dict())
|
_enforce_volume_storage_size(cluster_template.as_dict(), {})
|
||||||
return func(*args, **kwargs)
|
return func(*args, **kwargs)
|
||||||
|
|
||||||
return wrapper
|
return wrapper
|
||||||
@ -164,10 +177,11 @@ def _enforce_volume_driver_types(cluster_template):
|
|||||||
validator.validate_volume_driver(cluster_template['volume_driver'])
|
validator.validate_volume_driver(cluster_template['volume_driver'])
|
||||||
|
|
||||||
|
|
||||||
def _enforce_volume_storage_size(cluster_template):
|
def _enforce_volume_storage_size(cluster_template, cluster):
|
||||||
if not cluster_template.get('docker_volume_size'):
|
volume_size = cluster.get('docker_volume_size') \
|
||||||
|
or cluster_template.get('docker_volume_size')
|
||||||
|
if not volume_size:
|
||||||
return
|
return
|
||||||
volume_size = cluster_template.get('docker_volume_size')
|
|
||||||
storage_driver = cluster_template.get('docker_storage_driver')
|
storage_driver = cluster_template.get('docker_storage_driver')
|
||||||
if storage_driver == 'devicemapper':
|
if storage_driver == 'devicemapper':
|
||||||
if volume_size < 3:
|
if volume_size < 3:
|
||||||
|
@ -0,0 +1,31 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""add docker_volume_size to cluster
|
||||||
|
|
||||||
|
Revision ID: aa0cc27839af
|
||||||
|
Revises: bc46ba6cf949
|
||||||
|
Create Date: 2017-06-07 13:08:02.853105
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = 'aa0cc27839af'
|
||||||
|
down_revision = 'bc46ba6cf949'
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
pass
|
||||||
|
op.add_column('cluster', sa.Column('docker_volume_size',
|
||||||
|
sa.Integer(), nullable=True))
|
@ -115,6 +115,7 @@ class Cluster(Base):
|
|||||||
name = Column(String(255))
|
name = Column(String(255))
|
||||||
cluster_template_id = Column(String(255))
|
cluster_template_id = Column(String(255))
|
||||||
keypair = Column(String(255))
|
keypair = Column(String(255))
|
||||||
|
docker_volume_size = Column(Integer())
|
||||||
stack_id = Column(String(255))
|
stack_id = Column(String(255))
|
||||||
api_address = Column(String(255))
|
api_address = Column(String(255))
|
||||||
node_addresses = Column(JSONEncodedList)
|
node_addresses = Column(JSONEncodedList)
|
||||||
|
@ -55,7 +55,7 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||||||
def __init__(self):
|
def __init__(self):
|
||||||
super(K8sFedoraTemplateDefinition, self).__init__()
|
super(K8sFedoraTemplateDefinition, self).__init__()
|
||||||
self.add_parameter('docker_volume_size',
|
self.add_parameter('docker_volume_size',
|
||||||
cluster_template_attr='docker_volume_size')
|
cluster_attr='docker_volume_size')
|
||||||
self.add_parameter('docker_storage_driver',
|
self.add_parameter('docker_storage_driver',
|
||||||
cluster_template_attr='docker_storage_driver')
|
cluster_template_attr='docker_storage_driver')
|
||||||
self.add_output('kube_minions',
|
self.add_output('kube_minions',
|
||||||
@ -84,11 +84,11 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||||||
extra_params=extra_params,
|
extra_params=extra_params,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
def get_env_files(self, cluster_template):
|
def get_env_files(self, cluster_template, cluster):
|
||||||
env_files = []
|
env_files = []
|
||||||
|
|
||||||
template_def.add_priv_net_env_file(env_files, cluster_template)
|
template_def.add_priv_net_env_file(env_files, cluster_template)
|
||||||
template_def.add_volume_env_file(env_files, cluster_template)
|
template_def.add_volume_env_file(env_files, cluster)
|
||||||
template_def.add_lb_env_file(env_files, cluster_template)
|
template_def.add_lb_env_file(env_files, cluster_template)
|
||||||
template_def.add_fip_env_file(env_files, cluster_template)
|
template_def.add_fip_env_file(env_files, cluster_template)
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ class SwarmFedoraTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||||||
self.add_parameter('node_flavor',
|
self.add_parameter('node_flavor',
|
||||||
cluster_template_attr='flavor_id')
|
cluster_template_attr='flavor_id')
|
||||||
self.add_parameter('docker_volume_size',
|
self.add_parameter('docker_volume_size',
|
||||||
cluster_template_attr='docker_volume_size')
|
cluster_attr='docker_volume_size')
|
||||||
self.add_parameter('volume_driver',
|
self.add_parameter('volume_driver',
|
||||||
cluster_template_attr='volume_driver')
|
cluster_template_attr='volume_driver')
|
||||||
self.add_parameter('external_network',
|
self.add_parameter('external_network',
|
||||||
@ -122,11 +122,11 @@ class SwarmFedoraTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||||||
extra_params=extra_params,
|
extra_params=extra_params,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
def get_env_files(self, cluster_template):
|
def get_env_files(self, cluster_template, cluster):
|
||||||
env_files = []
|
env_files = []
|
||||||
|
|
||||||
template_def.add_priv_net_env_file(env_files, cluster_template)
|
template_def.add_priv_net_env_file(env_files, cluster_template)
|
||||||
template_def.add_volume_env_file(env_files, cluster_template)
|
template_def.add_volume_env_file(env_files, cluster)
|
||||||
template_def.add_lb_env_file(env_files, cluster_template)
|
template_def.add_lb_env_file(env_files, cluster_template)
|
||||||
|
|
||||||
return env_files
|
return env_files
|
||||||
|
@ -154,7 +154,7 @@ class TemplateDefinition(object):
|
|||||||
|
|
||||||
return template_params
|
return template_params
|
||||||
|
|
||||||
def get_env_files(self, cluster_template):
|
def get_env_files(self, cluster_template, cluster):
|
||||||
"""Gets stack environment files based upon ClusterTemplate attributes.
|
"""Gets stack environment files based upon ClusterTemplate attributes.
|
||||||
|
|
||||||
Base implementation returns no files (empty list). Meant to be
|
Base implementation returns no files (empty list). Meant to be
|
||||||
@ -199,7 +199,7 @@ class TemplateDefinition(object):
|
|||||||
def extract_definition(self, context, cluster_template, cluster, **kwargs):
|
def extract_definition(self, context, cluster_template, cluster, **kwargs):
|
||||||
return (self.template_path,
|
return (self.template_path,
|
||||||
self.get_params(context, cluster_template, cluster, **kwargs),
|
self.get_params(context, cluster_template, cluster, **kwargs),
|
||||||
self.get_env_files(cluster_template))
|
self.get_env_files(cluster_template, cluster))
|
||||||
|
|
||||||
|
|
||||||
class BaseTemplateDefinition(TemplateDefinition):
|
class BaseTemplateDefinition(TemplateDefinition):
|
||||||
@ -330,8 +330,8 @@ def add_lb_env_file(env_files, cluster_template):
|
|||||||
env_files.append(COMMON_ENV_PATH + 'no_master_lb.yaml')
|
env_files.append(COMMON_ENV_PATH + 'no_master_lb.yaml')
|
||||||
|
|
||||||
|
|
||||||
def add_volume_env_file(env_files, cluster_template):
|
def add_volume_env_file(env_files, cluster):
|
||||||
if cluster_template.docker_volume_size is None:
|
if cluster.docker_volume_size is None:
|
||||||
env_files.append(COMMON_ENV_PATH + 'no_volume.yaml')
|
env_files.append(COMMON_ENV_PATH + 'no_volume.yaml')
|
||||||
else:
|
else:
|
||||||
env_files.append(COMMON_ENV_PATH + 'with_volume.yaml')
|
env_files.append(COMMON_ENV_PATH + 'with_volume.yaml')
|
||||||
|
@ -30,7 +30,7 @@ class CoreOSK8sTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
|||||||
self.add_output('kube_masters',
|
self.add_output('kube_masters',
|
||||||
cluster_attr='master_addresses')
|
cluster_attr='master_addresses')
|
||||||
|
|
||||||
def get_env_files(self, cluster_template):
|
def get_env_files(self, cluster_template, cluster):
|
||||||
env_files = []
|
env_files = []
|
||||||
|
|
||||||
template_def.add_priv_net_env_file(env_files, cluster_template)
|
template_def.add_priv_net_env_file(env_files, cluster_template)
|
||||||
|
@ -81,7 +81,7 @@ class UbuntuMesosTemplateDefinition(template_def.BaseTemplateDefinition):
|
|||||||
extra_params=extra_params,
|
extra_params=extra_params,
|
||||||
**kwargs)
|
**kwargs)
|
||||||
|
|
||||||
def get_env_files(self, cluster_template):
|
def get_env_files(self, cluster_template, cluster):
|
||||||
env_files = []
|
env_files = []
|
||||||
|
|
||||||
template_def.add_priv_net_env_file(env_files, cluster_template)
|
template_def.add_priv_net_env_file(env_files, cluster_template)
|
||||||
|
@ -42,8 +42,9 @@ class Cluster(base.MagnumPersistentObject, base.MagnumObject,
|
|||||||
# Version 1.11: Added 'RESUME_FAILED' in status field
|
# Version 1.11: Added 'RESUME_FAILED' in status field
|
||||||
# Version 1.12: Added 'get_stats' method
|
# Version 1.12: Added 'get_stats' method
|
||||||
# Version 1.13: Added get_count_all method
|
# Version 1.13: Added get_count_all method
|
||||||
|
# Version 1.14: Added 'docker_volume_size' field
|
||||||
|
|
||||||
VERSION = '1.13'
|
VERSION = '1.14'
|
||||||
|
|
||||||
dbapi = dbapi.get_instance()
|
dbapi = dbapi.get_instance()
|
||||||
|
|
||||||
@ -55,6 +56,7 @@ class Cluster(base.MagnumPersistentObject, base.MagnumObject,
|
|||||||
'user_id': fields.StringField(nullable=True),
|
'user_id': fields.StringField(nullable=True),
|
||||||
'cluster_template_id': fields.StringField(nullable=True),
|
'cluster_template_id': fields.StringField(nullable=True),
|
||||||
'keypair': fields.StringField(nullable=True),
|
'keypair': fields.StringField(nullable=True),
|
||||||
|
'docker_volume_size': fields.IntegerField(nullable=True),
|
||||||
'stack_id': fields.StringField(nullable=True),
|
'stack_id': fields.StringField(nullable=True),
|
||||||
'status': m_fields.ClusterStatusField(nullable=True),
|
'status': m_fields.ClusterStatusField(nullable=True),
|
||||||
'status_reason': fields.StringField(nullable=True),
|
'status_reason': fields.StringField(nullable=True),
|
||||||
|
@ -781,6 +781,35 @@ class TestPost(api_base.FunctionalTest):
|
|||||||
self.assertTrue(self.mock_valid_os_res.called)
|
self.assertTrue(self.mock_valid_os_res.called)
|
||||||
self.assertEqual(409, response.status_int)
|
self.assertEqual(409, response.status_int)
|
||||||
|
|
||||||
|
def test_create_cluster_with_docker_volume_size(self):
|
||||||
|
bdict = apiutils.cluster_post_data()
|
||||||
|
bdict['docker_volume_size'] = 3
|
||||||
|
response = self.post_json('/clusters', bdict)
|
||||||
|
self.assertEqual('application/json', response.content_type)
|
||||||
|
self.assertEqual(202, response.status_int)
|
||||||
|
cluster, timeout = self.mock_cluster_create.call_args
|
||||||
|
self.assertEqual(3, cluster[0].docker_volume_size)
|
||||||
|
|
||||||
|
def test_create_cluster_without_docker_volume_size(self):
|
||||||
|
bdict = apiutils.cluster_post_data()
|
||||||
|
response = self.post_json('/clusters', bdict)
|
||||||
|
self.assertEqual('application/json', response.content_type)
|
||||||
|
self.assertEqual(202, response.status_int)
|
||||||
|
cluster, timeout = self.mock_cluster_create.call_args
|
||||||
|
# Verify docker_volume_size from ClusterTemplate is used
|
||||||
|
self.assertEqual(20, cluster[0].docker_volume_size)
|
||||||
|
|
||||||
|
def test_create_cluster_with_invalid_docker_volume_size(self):
|
||||||
|
invalid_values = [(-1, None), ('notanint', None),
|
||||||
|
(1, 'devicemapper'), (2, 'devicemapper')]
|
||||||
|
for value in invalid_values:
|
||||||
|
bdict = apiutils.cluster_post_data(docker_volume_size=value[0],
|
||||||
|
docker_storage_driver=value[1])
|
||||||
|
response = self.post_json('/clusters', bdict, expect_errors=True)
|
||||||
|
self.assertEqual('application/json', response.content_type)
|
||||||
|
self.assertEqual(400, response.status_int)
|
||||||
|
self.assertTrue(response.json['errors'])
|
||||||
|
|
||||||
|
|
||||||
class TestDelete(api_base.FunctionalTest):
|
class TestDelete(api_base.FunctionalTest):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
|
@ -74,6 +74,7 @@ class TestClusterConductorWithK8s(base.TestCase):
|
|||||||
'node_count': 1,
|
'node_count': 1,
|
||||||
'master_count': 1,
|
'master_count': 1,
|
||||||
'discovery_url': 'https://discovery.etcd.io/test',
|
'discovery_url': 'https://discovery.etcd.io/test',
|
||||||
|
'docker_volume_size': 20,
|
||||||
'master_addresses': ['172.17.2.18'],
|
'master_addresses': ['172.17.2.18'],
|
||||||
'ca_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx',
|
'ca_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx',
|
||||||
'magnum_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx',
|
'magnum_cert_ref': 'http://barbican/v1/containers/xx-xx-xx-xx',
|
||||||
@ -353,6 +354,7 @@ class TestClusterConductorWithK8s(base.TestCase):
|
|||||||
'auth_url': 'http://192.168.10.10:5000/v3',
|
'auth_url': 'http://192.168.10.10:5000/v3',
|
||||||
'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
'cluster_uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
||||||
'discovery_url': 'https://discovery.etcd.io/test',
|
'discovery_url': 'https://discovery.etcd.io/test',
|
||||||
|
'docker_volume_size': 20,
|
||||||
'external_network': 'external_network_id',
|
'external_network': 'external_network_id',
|
||||||
'flannel_backend': 'vxlan',
|
'flannel_backend': 'vxlan',
|
||||||
'flannel_network_cidr': '10.101.0.0/16',
|
'flannel_network_cidr': '10.101.0.0/16',
|
||||||
@ -384,7 +386,7 @@ class TestClusterConductorWithK8s(base.TestCase):
|
|||||||
self.assertEqual(expected, definition)
|
self.assertEqual(expected, definition)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
['../../common/templates/environments/with_private_network.yaml',
|
['../../common/templates/environments/with_private_network.yaml',
|
||||||
'../../common/templates/environments/no_volume.yaml',
|
'../../common/templates/environments/with_volume.yaml',
|
||||||
'../../common/templates/environments/no_master_lb.yaml',
|
'../../common/templates/environments/no_master_lb.yaml',
|
||||||
'../../common/templates/environments/disable_floating_ip.yaml',
|
'../../common/templates/environments/disable_floating_ip.yaml',
|
||||||
],
|
],
|
||||||
|
@ -62,6 +62,7 @@ class TestClusterConductorWithSwarm(base.TestCase):
|
|||||||
'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
'uuid': '5d12f6fd-a196-4bf0-ae4c-1f639a523a52',
|
||||||
'cluster_template_id': 'xx-xx-xx-xx',
|
'cluster_template_id': 'xx-xx-xx-xx',
|
||||||
'keypair': 'keypair_id',
|
'keypair': 'keypair_id',
|
||||||
|
'docker_volume_size': 20,
|
||||||
'name': 'cluster1',
|
'name': 'cluster1',
|
||||||
'stack_id': 'xx-xx-xx-xx',
|
'stack_id': 'xx-xx-xx-xx',
|
||||||
'api_address': '172.17.2.3',
|
'api_address': '172.17.2.3',
|
||||||
@ -295,12 +296,13 @@ class TestClusterConductorWithSwarm(base.TestCase):
|
|||||||
'swarm_version': 'fake-version',
|
'swarm_version': 'fake-version',
|
||||||
'swarm_strategy': u'spread',
|
'swarm_strategy': u'spread',
|
||||||
'rexray_preempt': 'False',
|
'rexray_preempt': 'False',
|
||||||
'docker_volume_type': 'lvmdriver-1'
|
'docker_volume_type': 'lvmdriver-1',
|
||||||
|
'docker_volume_size': 20,
|
||||||
}
|
}
|
||||||
self.assertEqual(expected, definition)
|
self.assertEqual(expected, definition)
|
||||||
self.assertEqual(
|
self.assertEqual(
|
||||||
['../../common/templates/environments/with_private_network.yaml',
|
['../../common/templates/environments/with_private_network.yaml',
|
||||||
'../../common/templates/environments/no_volume.yaml',
|
'../../common/templates/environments/with_volume.yaml',
|
||||||
'../../common/templates/environments/no_master_lb.yaml'],
|
'../../common/templates/environments/no_master_lb.yaml'],
|
||||||
env_files)
|
env_files)
|
||||||
|
|
||||||
|
@ -96,6 +96,7 @@ def get_test_cluster(**kw):
|
|||||||
'master_addresses': kw.get('master_addresses', ['172.17.2.18']),
|
'master_addresses': kw.get('master_addresses', ['172.17.2.18']),
|
||||||
'created_at': kw.get('created_at'),
|
'created_at': kw.get('created_at'),
|
||||||
'updated_at': kw.get('updated_at'),
|
'updated_at': kw.get('updated_at'),
|
||||||
|
'docker_volume_size': kw.get('docker_volume_size'),
|
||||||
}
|
}
|
||||||
|
|
||||||
# Only add Keystone trusts related attributes on demand since they may
|
# Only add Keystone trusts related attributes on demand since they may
|
||||||
|
@ -38,6 +38,7 @@ class TestClusterObject(base.DbTestCase):
|
|||||||
self.fake_cluster_template = objects.ClusterTemplate(
|
self.fake_cluster_template = objects.ClusterTemplate(
|
||||||
uuid=cluster_template_id)
|
uuid=cluster_template_id)
|
||||||
self.fake_cluster['keypair'] = 'keypair1'
|
self.fake_cluster['keypair'] = 'keypair1'
|
||||||
|
self.fake_cluster['docker_volume_size'] = 3
|
||||||
|
|
||||||
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
@mock.patch('magnum.objects.ClusterTemplate.get_by_uuid')
|
||||||
def test_get_by_id(self, mock_cluster_template_get):
|
def test_get_by_id(self, mock_cluster_template_get):
|
||||||
|
@ -355,7 +355,7 @@ class TestObject(test_base.TestCase, _TestObject):
|
|||||||
# For more information on object version testing, read
|
# For more information on object version testing, read
|
||||||
# http://docs.openstack.org/developer/magnum/objects.html
|
# http://docs.openstack.org/developer/magnum/objects.html
|
||||||
object_data = {
|
object_data = {
|
||||||
'Cluster': '1.13-5da08d5f023eab4c5657c3fb6997e44c',
|
'Cluster': '1.14-281c582b16291c4f0666371e53975a5c',
|
||||||
'ClusterTemplate': '1.17-74e4e6b1faca768714be809a828599c2',
|
'ClusterTemplate': '1.17-74e4e6b1faca768714be809a828599c2',
|
||||||
'Certificate': '1.1-1924dc077daa844f0f9076332ef96815',
|
'Certificate': '1.1-1924dc077daa844f0f9076332ef96815',
|
||||||
'MyObj': '1.0-34c4b1aadefd177b13f9a2f894cc23cd',
|
'MyObj': '1.0-34c4b1aadefd177b13f9a2f894cc23cd',
|
||||||
|
Loading…
Reference in New Issue
Block a user