Merge "[fedora atomic k8s] Add boot from volume support"

This commit is contained in:
Zuul 2019-09-20 11:21:33 +00:00 committed by Gerrit Code Review
commit 60d2485d83
14 changed files with 356 additions and 24 deletions

View File

@ -346,9 +346,15 @@ the table are linked to more details elsewhere in the user guide.
+---------------------------------------+--------------------+---------------+
| `docker_volume_type`_ | see below | see below |
+---------------------------------------+--------------------+---------------+
| `boot_volume_size`_ | see below | see below |
+---------------------------------------+--------------------+---------------+
| `boot_volume_type`_ | see below | see below |
+---------------------------------------+--------------------+---------------+
| `etcd_volume_size`_ | etcd storage | 0 |
| | volume size | |
+---------------------------------------+--------------------+---------------+
| `etcd_volume_type`_ | see below | see below |
+---------------------------------------+--------------------+---------------+
| `container_infra_prefix`_ | see below | "" |
+---------------------------------------+--------------------+---------------+
| `availability_zone`_ | AZ for the cluster | "" |
@ -1114,10 +1120,26 @@ _`admission_control_list`
The default value corresponds to the one recommended in this doc
for our current Kubernetes version.
_`boot_volume_size`
This label overrides the default_boot_volume_size of instances which is
useful if your flavors are boot from volume only. The default value is 0,
meaning that cluster instances will not boot from volume.
_`boot_volume_type`
This label overrides the default_boot_volume_type of instances which is
useful if your flavors are boot from volume only. The default value is '',
meaning that Magnum will randomly select a Cinder volume type from all
available options.
_`etcd_volume_size`
This label sets the size of a volume holding the etcd storage data.
The default value is 0, meaning the etcd data is not persisted (no volume).
_`etcd_volume_type`
This label overrides the default_etcd_volume_type holding the etcd storage
data. The default value is '', meaning meaning that Magnum will randomly
select a Cinder volume type from all available options.
_`container_infra_prefix`
Prefix of all container images used in the cluster (kubernetes components,
coredns, kubernetes-dashboard, node-exporter). For example,

View File

@ -117,6 +117,7 @@ python-barbicanclient==4.5.2
python-dateutil==2.7.0
python-editor==1.0.3
python-glanceclient==2.8.0
python-cinderclient==2.2.0
python-heatclient==1.10.0
python-keystoneclient==3.8.0
python-mimeparse==1.6.0

46
magnum/common/cinder.py Normal file
View File

@ -0,0 +1,46 @@
# Copyright 2019 Catalyst Cloud Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log as logging
from magnum.common import clients
from magnum.common import exception
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def get_default_docker_volume_type(context):
return (CONF.cinder.default_docker_volume_type or
_get_random_volume_type(context))
def get_default_boot_volume_type(context):
return (CONF.cinder.default_boot_volume_type or
_get_random_volume_type(context))
def get_default_etcd_volume_type(context):
return (CONF.cinder.default_etcd_volume_type or
_get_random_volume_type(context))
def _get_random_volume_type(context):
c_client = clients.OpenStackClients(context).cinder()
volume_types = c_client.volume_types.list()
if volume_types:
return volume_types[0].name
else:
raise exception.VolumeTypeNotFound()

View File

@ -13,6 +13,7 @@
# under the License.
from barbicanclient.v1 import client as barbicanclient
from cinderclient.v2 import client as cinder_client
from glanceclient import client as glanceclient
from heatclient import client as heatclient
from keystoneauth1.exceptions import catalog
@ -41,6 +42,7 @@ class OpenStackClients(object):
self._nova = None
self._neutron = None
self._octavia = None
self._cinder = None
def url_for(self, **kwargs):
return self.keystone().session.get_endpoint(**kwargs)
@ -207,3 +209,24 @@ class OpenStackClients(object):
}
self._neutron = neutronclient.Client(**args)
return self._neutron
@exception.wrap_keystone_exception
def cinder(self):
if self._cinder:
return self._cinder
endpoint_type = self._get_client_option('cinder', 'endpoint_type')
region_name = self._get_client_option('cinder', 'region_name')
cinderclient_version = self._get_client_option('cinder', 'api_version')
endpoint = self.url_for(service_type='block-storage',
interface=endpoint_type,
region_name=region_name)
args = {
'cacert': self._get_client_option('cinder', 'ca_file'),
'insecure': self._get_client_option('cinder', 'insecure')
}
session = self.keystone().session
self._cinder = cinder_client.Client(cinderclient_version,
session=session,
endpoint_override=endpoint, **args)
return self._cinder

View File

@ -279,6 +279,12 @@ class OperationInProgress(Invalid):
"progress.")
class VolumeTypeNotFound(ResourceNotFound):
"""The code here changed to 400 according to the latest document."""
message = _("Valid volume type could not be found.")
code = 400
class ImageNotFound(ResourceNotFound):
"""The code here changed to 400 according to the latest document."""
message = _("Image %(image_id)s could not be found.")

View File

@ -28,12 +28,53 @@ cinder_opts = [
help=_('The default docker volume_type to use for volumes '
'used for docker storage. To use the cinder volumes '
'for docker storage, you need to select a default '
'value.'))]
'value. Otherwise, Magnum will select random one from '
'Cinder volume type list.')),
cfg.StrOpt('default_etcd_volume_type',
default='',
help=_('The default etcd volume_type to use for volumes '
'used for etcd storage. To use the cinder volumes '
'for etcd storage, you need to select a default '
'value. Otherwise, Magnum will select random one from '
'Cinder volume type list.')),
cfg.StrOpt('default_boot_volume_type',
default='',
help=_('The default boot volume_type to use for volumes '
'used for VM of COE. To use the cinder volumes '
'for VM of COE, you need to select a default '
'value. Otherwise, Magnum will select random one from '
'Cinder volume type list.')),
cfg.IntOpt('default_boot_volume_size',
default=0,
help=_('The default volume size to use for volumes '
'used for VM of COE.'))
]
cinder_client_opts = [
cfg.StrOpt('region_name',
help=_('Region in Identity service catalog to use for '
'communication with the OpenStack service.'))]
'communication with the OpenStack service.')),
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_('Type of endpoint in Identity service catalog to use '
'for communication with the OpenStack service.')),
cfg.StrOpt('api_version',
default='2',
help=_('Version of Cinder API to use in cinderclient.'))
]
common_security_opts = [
cfg.StrOpt('ca_file',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.StrOpt('cert_file',
help=_('Optional PEM-formatted certificate chain file.')),
cfg.StrOpt('key_file',
help=_('Optional PEM-formatted file that contains the '
'private key.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate will not "
"be verified."))]
def register_opts(conf):
@ -41,6 +82,7 @@ def register_opts(conf):
conf.register_group(cinder_client_group)
conf.register_opts(cinder_opts, group=cinder_group)
conf.register_opts(cinder_client_opts, group=cinder_client_group)
conf.register_opts(common_security_opts, group=cinder_client_group)
def list_opts():

View File

@ -15,6 +15,7 @@ import json
from oslo_log import log as logging
from oslo_utils import strutils
from magnum.common import cinder
from magnum.common import exception
from magnum.common.x509 import operations as x509
from magnum.conductor.handlers.common import cert_manager
@ -91,11 +92,7 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
osc = self.get_osc(context)
extra_params['region_name'] = osc.cinder_region_name()
# set docker_volume_type
# use the configuration default if None provided
docker_volume_type = cluster.labels.get(
'docker_volume_type', CONF.cinder.default_docker_volume_type)
extra_params['docker_volume_type'] = docker_volume_type
self._set_volumes(context, cluster, extra_params)
extra_params['nodes_affinity_policy'] = \
CONF.cluster.nodes_affinity_policy
@ -169,6 +166,7 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
self._set_cert_manager_params(cluster, extra_params)
self._get_keystone_auth_default_policy(extra_params)
self._set_volumes(context, cluster, extra_params)
return super(K8sFedoraTemplateDefinition,
self).get_params(context, cluster_template, cluster,
@ -220,6 +218,28 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
.replace("$PROJECT_ID", extra_params["project_id"])
extra_params["keystone_auth_default_policy"] = washed_policy
def _set_volumes(self, context, cluster, extra_params):
# set docker_volume_type
docker_volume_type = cluster.labels.get(
'docker_volume_type',
cinder.get_default_docker_volume_type(context))
extra_params['docker_volume_type'] = docker_volume_type
# set etcd_volume_type
etcd_volume_type = cluster.labels.get(
'etcd_volume_type', cinder.get_default_etcd_volume_type(context))
extra_params['etcd_volume_type'] = etcd_volume_type
# set boot_volume_type
boot_volume_type = cluster.labels.get(
'boot_volume_type', cinder.get_default_boot_volume_type(context))
extra_params['boot_volume_type'] = boot_volume_type
# set boot_volume_size
boot_volume_size = cluster.labels.get(
'boot_volume_size', CONF.cinder.default_boot_volume_size)
extra_params['boot_volume_size'] = boot_volume_size
def get_env_files(self, cluster_template, cluster):
env_files = []

View File

@ -1,4 +1,4 @@
heat_template_version: 2014-10-16
heat_template_version: 2015-04-30
description: >
This template will boot a Kubernetes cluster with one or more
@ -143,12 +143,27 @@ parameters:
constraints:
- allowed_values: ["true", "false"]
boot_volume_size:
type: number
description: >
size of the cinder boot volume for nodes root volume
boot_volume_type:
type: string
description: >
type of the cinder boot volume for nodes root volume
etcd_volume_size:
type: number
description: >
size of the cinder volume for etcd storage
default: 0
etcd_volume_type:
type: string
description: >
type of a cinder volume for etcd storage
docker_volume_size:
type: number
description: >
@ -873,7 +888,10 @@ resources:
master_flavor: {get_param: master_flavor}
external_network: {get_param: external_network}
kube_allow_priv: {get_param: kube_allow_priv}
boot_volume_size: {get_param: boot_volume_size}
boot_volume_type: {get_param: boot_volume_type}
etcd_volume_size: {get_param: etcd_volume_size}
etcd_volume_type: {get_param: etcd_volume_type}
docker_volume_size: {get_param: docker_volume_size}
docker_volume_type: {get_param: docker_volume_type}
docker_storage_driver: {get_param: docker_storage_driver}
@ -1056,6 +1074,8 @@ resources:
etcd_server_ip: {get_attr: [etcd_address_lb_switch, private_ip]}
external_network: {get_param: external_network}
kube_allow_priv: {get_param: kube_allow_priv}
boot_volume_size: {get_param: boot_volume_size}
boot_volume_type: {get_param: boot_volume_type}
docker_volume_size: {get_param: docker_volume_size}
docker_volume_type: {get_param: docker_volume_type}
docker_storage_driver: {get_param: docker_storage_driver}

View File

@ -1,4 +1,4 @@
heat_template_version: 2014-10-16
heat_template_version: queens
description: >
This is a nested stack that defines a single Kubernetes master, This stack is
@ -39,11 +39,27 @@ parameters:
constraints:
- allowed_values: ["true", "false"]
boot_volume_size:
type: number
description: >
size of the cinder boot volume for nodes root volume
default: 0
boot_volume_type:
type: string
description: >
type of the cinder boot volume for nodes root volume
etcd_volume_size:
type: number
description: >
size of a cinder volume to allocate for etcd storage
etcd_volume_type:
type: string
description: >
type of a cinder volume to allocate for etcd storage
docker_volume_size:
type: number
description: >
@ -506,6 +522,15 @@ parameters:
default:
true
conditions:
image_based: {equals: [{get_param: boot_volume_size}, 0]}
volume_based:
not:
equals:
- get_param: boot_volume_size
- 0
resources:
######################################################################
#
@ -671,7 +696,7 @@ resources:
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: master_config}
server: {get_resource: kube-master}
server: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]}
actions: ['CREATE']
######################################################################
@ -679,11 +704,20 @@ resources:
# a single kubernetes master.
#
kube_node_volume:
type: OS::Cinder::Volume
condition: volume_based
properties:
image: {get_param: server_image}
size: {get_param: boot_volume_size}
volume_type: {get_param: boot_volume_type}
# do NOT use "_" (underscore) in the Nova server name
# it creates a mismatch between the generated Nova name and its hostname
# which can lead to weird problems
kube-master:
type: OS::Nova::Server
condition: image_based
properties:
name: {get_param: name}
image: {get_param: server_image}
@ -697,6 +731,25 @@ resources:
scheduler_hints: { group: { get_param: nodes_server_group_id }}
availability_zone: {get_param: availability_zone}
kube-master-bfv:
type: OS::Nova::Server
condition: volume_based
properties:
name: {get_param: name}
flavor: {get_param: master_flavor}
key_name: {get_param: ssh_key_name}
user_data_format: SOFTWARE_CONFIG
software_config_transport: POLL_SERVER_HEAT
user_data: {get_resource: agent_config}
networks:
- port: {get_resource: kube_master_eth0}
scheduler_hints: { group: { get_param: nodes_server_group_id }}
availability_zone: {get_param: availability_zone}
block_device_mapping_v2:
- boot_index: 0
volume_id: {get_resource: kube_node_volume}
delete_on_termination: true
kube_master_eth0:
type: OS::Neutron::Port
properties:
@ -741,11 +794,12 @@ resources:
type: Magnum::Optional::Etcd::Volume
properties:
size: {get_param: etcd_volume_size}
volume_type: {get_param: etcd_volume_type}
etcd_volume_attach:
type: Magnum::Optional::Etcd::VolumeAttachment
properties:
instance_uuid: {get_resource: kube-master}
instance_uuid: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]}
volume_id: {get_resource: etcd_volume}
mountpoint: /dev/vdc
@ -764,7 +818,7 @@ resources:
docker_volume_attach:
type: Magnum::Optional::Cinder::VolumeAttachment
properties:
instance_uuid: {get_resource: kube-master}
instance_uuid: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]}
volume_id: {get_resource: docker_volume}
mountpoint: /dev/vdb
@ -782,7 +836,7 @@ resources:
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: upgrade_kubernetes}
server: {get_resource: kube-master}
server: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]}
actions: ['UPDATE']
input_values:
kube_tag_input: {get_param: kube_tag}
@ -790,7 +844,7 @@ resources:
outputs:
OS::stack_id:
value: { get_resource: kube-master }
value: {if: ["volume_based", {get_resource: kube-master-bfv}, {get_resource: kube-master}]}
kube_master_ip:
value: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]}

View File

@ -1,4 +1,4 @@
heat_template_version: 2014-10-16
heat_template_version: queens
description: >
This is a nested stack that defines a single Kubernetes minion, This stack is
@ -34,6 +34,16 @@ parameters:
constraints:
- allowed_values: ["true", "false"]
boot_volume_size:
type: number
description: >
size of the cinder boot volume
boot_volume_type:
type: string
description: >
type of the cinder boot volume
docker_volume_size:
type: number
description: >
@ -294,6 +304,15 @@ parameters:
default:
true
conditions:
image_based: {equals: [{get_param: boot_volume_size}, 0]}
volume_based:
not:
equals:
- get_param: boot_volume_size
- 0
resources:
agent_config:
@ -399,7 +418,7 @@ resources:
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: node_config}
server: {get_resource: kube-minion}
server: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]}
actions: ['CREATE']
######################################################################
@ -407,14 +426,38 @@ resources:
# a single kubernetes minion.
#
kube_node_volume:
type: OS::Cinder::Volume
condition: volume_based
properties:
image: {get_param: server_image}
size: {get_param: boot_volume_size}
volume_type: {get_param: boot_volume_type}
# do NOT use "_" (underscore) in the Nova server name
# it creates a mismatch between the generated Nova name and its hostname
# which can lead to weird problems
kube-minion:
condition: image_based
type: OS::Nova::Server
properties:
name: {get_param: name}
flavor: {get_param: minion_flavor}
image: {get_param: server_image}
key_name: {get_param: ssh_key_name}
user_data: {get_resource: agent_config}
user_data_format: SOFTWARE_CONFIG
software_config_transport: POLL_SERVER_HEAT
networks:
- port: {get_resource: kube_minion_eth0}
scheduler_hints: { group: { get_param: nodes_server_group_id }}
availability_zone: {get_param: availability_zone}
kube-minion-bfv:
condition: volume_based
type: OS::Nova::Server
properties:
name: {get_param: name}
flavor: {get_param: minion_flavor}
key_name: {get_param: ssh_key_name}
user_data: {get_resource: agent_config}
@ -424,6 +467,10 @@ resources:
- port: {get_resource: kube_minion_eth0}
scheduler_hints: { group: { get_param: nodes_server_group_id }}
availability_zone: {get_param: availability_zone}
block_device_mapping_v2:
- boot_index: 0
volume_id: {get_resource: kube_node_volume}
delete_on_termination: true
kube_minion_eth0:
type: OS::Neutron::Port
@ -458,7 +505,7 @@ resources:
docker_volume_attach:
type: Magnum::Optional::Cinder::VolumeAttachment
properties:
instance_uuid: {get_resource: kube-minion}
instance_uuid: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]}
volume_id: {get_resource: docker_volume}
mountpoint: /dev/vdb
@ -476,7 +523,7 @@ resources:
properties:
signal_transport: HEAT_SIGNAL
config: {get_resource: upgrade_kubernetes}
server: {get_resource: kube-minion}
server: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]}
actions: ['UPDATE']
input_values:
kube_tag_input: {get_param: kube_tag}
@ -505,6 +552,6 @@ outputs:
######################################################################
OS::stack_id:
value: { get_resource: kube-minion }
value: {if: ["volume_based", {get_resource: kube-minion-bfv}, {get_resource: kube-minion}]}
description: >
This is the Nova server id of the node.

View File

@ -114,7 +114,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'kubescheduler_options': '--kubescheduler',
'kubeproxy_options': '--kubeproxy',
'influx_grafana_dashboard_enabled': 'True',
'service_cluster_ip_range': '10.254.0.0/16'},
'service_cluster_ip_range': '10.254.0.0/16',
'boot_volume_size': '60'},
'master_flavor_id': 'master_flavor_id',
'flavor_id': 'flavor_id',
'project_id': 'project_id',
@ -175,6 +176,10 @@ class TestClusterConductorWithK8s(base.TestCase):
self.mock_enable_octavia = octavia_patcher.start()
self.mock_enable_octavia.return_value = False
self.addCleanup(octavia_patcher.stop)
CONF.set_override('default_boot_volume_type',
'lvmdriver-1', group='cinder')
CONF.set_override('default_etcd_volume_type',
'lvmdriver-1', group='cinder')
@patch('requests.get')
@patch('magnum.objects.ClusterTemplate.get_by_uuid')
@ -262,6 +267,8 @@ class TestClusterConductorWithK8s(base.TestCase):
'kube_dashboard_enabled': 'True',
'influx_grafana_dashboard_enabled': 'True',
'docker_volume_type': 'lvmdriver-1',
'boot_volume_type': 'lvmdriver-1',
'etcd_volume_type': 'lvmdriver-1',
'etcd_volume_size': None,
'availability_zone': 'az_1',
'cert_manager_api': 'False',
@ -349,7 +356,10 @@ class TestClusterConductorWithK8s(base.TestCase):
'max_node_count': 2,
'master_image': 'image_id',
'minion_image': 'image_id',
'keystone_auth_default_policy': self.keystone_auth_default_policy
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'boot_volume_size': '60',
'boot_volume_type': 'lvmdriver-1',
'etcd_volume_type': 'lvmdriver-1'
}
if missing_attr is not None:
expected.pop(mapping[missing_attr], None)
@ -489,7 +499,10 @@ class TestClusterConductorWithK8s(base.TestCase):
'max_node_count': 2,
'master_image': 'image_id',
'minion_image': 'image_id',
'keystone_auth_default_policy': self.keystone_auth_default_policy
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'boot_volume_size': '60',
'boot_volume_type': 'lvmdriver-1',
'etcd_volume_type': 'lvmdriver-1'
}
self.assertEqual(expected, definition)
@ -611,7 +624,10 @@ class TestClusterConductorWithK8s(base.TestCase):
'max_node_count': 2,
'master_image': None,
'minion_image': None,
'keystone_auth_default_policy': self.keystone_auth_default_policy
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'boot_volume_size': '60',
'boot_volume_type': 'lvmdriver-1',
'etcd_volume_type': 'lvmdriver-1'
}
self.assertEqual(expected, definition)
self.assertEqual(
@ -1044,7 +1060,10 @@ class TestClusterConductorWithK8s(base.TestCase):
'max_node_count': 2,
'master_image': 'image_id',
'minion_image': 'image_id',
'keystone_auth_default_policy': self.keystone_auth_default_policy
'keystone_auth_default_policy': self.keystone_auth_default_policy,
'boot_volume_size': '60',
'boot_volume_type': 'lvmdriver-1',
'etcd_volume_type': 'lvmdriver-1'
}
self.assertEqual(expected, definition)
self.assertEqual(

View File

@ -479,6 +479,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
'influx_grafana_dashboard_enabled')
docker_volume_type = mock_cluster.labels.get(
'docker_volume_type')
boot_volume_size = mock_cluster.labels.get(
'boot_volume_size')
etcd_volume_size = mock_cluster.labels.get(
'etcd_volume_size')
kube_tag = mock_cluster.labels.get('kube_tag')
@ -566,6 +568,9 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
npd_enabled = mock_cluster.labels.get('npd_enabled')
master_image = mock_cluster_template.image_id
minion_image = mock_cluster_template.image_id
boot_volume_size = mock_cluster.labels.get('boot_volume_size')
boot_volume_type = mock_cluster.labels.get('boot_volume_type')
etcd_volume_type = mock_cluster.labels.get('etcd_volume_type')
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
@ -585,6 +590,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
'influx_grafana_dashboard_enabled':
influx_grafana_dashboard_enabled,
'docker_volume_type': docker_volume_type,
'boot_volume_size': boot_volume_size,
'etcd_volume_size': etcd_volume_size,
'kubelet_options': kubelet_options,
'kubeapi_options': kubeapi_options,
@ -647,6 +653,9 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
'kube_version': kube_tag,
'master_kube_tag': kube_tag,
'minion_kube_tag': kube_tag,
'boot_volume_size': boot_volume_size,
'boot_volume_type': boot_volume_type,
'etcd_volume_type': etcd_volume_type
}}
mock_get_params.assert_called_once_with(mock_context,
mock_cluster_template,
@ -905,6 +914,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
'influx_grafana_dashboard_enabled')
docker_volume_type = mock_cluster.labels.get(
'docker_volume_type')
boot_volume_size = mock_cluster.labels.get(
'boot_volume_size')
etcd_volume_size = mock_cluster.labels.get(
'etcd_volume_size')
kube_tag = mock_cluster.labels.get('kube_tag')
@ -992,6 +1003,9 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
npd_enabled = mock_cluster.labels.get('npd_enabled')
master_image = mock_cluster_template.image_id
minion_image = mock_cluster_template.image_id
boot_volume_size = mock_cluster.labels.get('boot_volume_size')
boot_volume_type = mock_cluster.labels.get('boot_volume_type')
etcd_volume_type = mock_cluster.labels.get('etcd_volume_type')
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
@ -1012,6 +1026,7 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
'influx_grafana_dashboard_enabled':
influx_grafana_dashboard_enabled,
'docker_volume_type': docker_volume_type,
'boot_volume_size': boot_volume_size,
'etcd_volume_size': etcd_volume_size,
'kubelet_options': kubelet_options,
'kubeapi_options': kubeapi_options,
@ -1075,6 +1090,9 @@ class AtomicK8sTemplateDefinitionTestCase(BaseK8sTemplateDefinitionTestCase):
'kube_version': kube_tag,
'master_kube_tag': kube_tag,
'minion_kube_tag': kube_tag,
'boot_volume_size': boot_volume_size,
'boot_volume_type': boot_volume_type,
'etcd_volume_type': etcd_volume_type
}}
mock_get_params.assert_called_once_with(mock_context,
mock_cluster_template,

View File

@ -0,0 +1,13 @@
---
features:
- |
Support boot from volume for Kubernetes all nodes (master and worker)
so that user can create a big size root volume, which could be more
flexible than using docker_volume_size. And user can specify the
volume type so that user can leverage high performance storage, e.g.
NVMe etc. And a new label etcd_volme_type is added as well so that
user can set volume type for etcd volume. If the boot_volume_type
or etcd_volume_type are not passed by labels, Magnum will try to
read them from config option default_boot_volume_type and
default_etcd_volume_type. A random volume type from Cinder will
be used if those options are not set.

View File

@ -42,6 +42,7 @@ pbr!=2.1.0,>=2.0.0 # Apache-2.0
pecan!=1.0.2,!=1.0.3,!=1.0.4,!=1.2,>=1.0.0 # BSD
pycadf!=2.0.0,>=1.1.0 # Apache-2.0
python-barbicanclient>=4.5.2 # Apache-2.0
python-cinderclient>=2.2.0 # Apache-2.0
python-glanceclient>=2.8.0 # Apache-2.0
python-heatclient>=1.10.0 # Apache-2.0
python-neutronclient>=6.7.0 # Apache-2.0