Add label availability_zone
Add a new label 'availability_zone' allowing users to specify the AZ the nodes should be deployed in. Only one AZ can be passed for this first implementation. Change-Id: I9e55d7631191fffa6cc6b9bebbeb4faf2497815b Partially-Implements: blueprint magnum-availability-zones
This commit is contained in:
parent
c23d908480
commit
53d386dc01
@ -341,6 +341,10 @@ the table are linked to more details elsewhere in the user guide.
|
||||
+---------------------------------------+--------------------+---------------+
|
||||
| `container_infra_prefix`_ | see below | "" |
|
||||
+---------------------------------------+--------------------+---------------+
|
||||
+---------------------------------------+--------------------+---------------+
|
||||
| `availability_zone`_ | AZ for the cluster | "" |
|
||||
| | nodes | |
|
||||
+---------------------------------------+--------------------+---------------+
|
||||
|
||||
Cluster
|
||||
-------
|
||||
@ -2020,7 +2024,16 @@ _`flannel_backend`
|
||||
|
||||
High Availability
|
||||
=================
|
||||
*To be filled in*
|
||||
|
||||
Support for highly available clusters is a work in progress, the goal being to
|
||||
enable clusters spanning multiple availability zones.
|
||||
|
||||
As of today you can specify one single availability zone for you cluster.
|
||||
|
||||
_`availability_zone`
|
||||
The availability zone where the cluster nodes should be deployed. If not
|
||||
specified, the default is None.
|
||||
|
||||
|
||||
Scaling
|
||||
=======
|
||||
|
@ -78,18 +78,16 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition):
|
||||
'docker_volume_type', CONF.cinder.default_docker_volume_type)
|
||||
extra_params['docker_volume_type'] = docker_volume_type
|
||||
|
||||
kube_tag = cluster.labels.get('kube_tag')
|
||||
if kube_tag:
|
||||
extra_params['kube_tag'] = kube_tag
|
||||
|
||||
container_infra_prefix = cluster.labels.get(
|
||||
'container_infra_prefix')
|
||||
if container_infra_prefix:
|
||||
extra_params['container_infra_prefix'] = container_infra_prefix
|
||||
|
||||
extra_params['nodes_affinity_policy'] = \
|
||||
CONF.cluster.nodes_affinity_policy
|
||||
|
||||
label_list = ['kube_tag', 'container_infra_prefix',
|
||||
'availability_zone']
|
||||
for label in label_list:
|
||||
label_value = cluster.labels.get(label)
|
||||
if label_value:
|
||||
extra_params[label] = label_value
|
||||
|
||||
return super(K8sFedoraTemplateDefinition,
|
||||
self).get_params(context, cluster_template, cluster,
|
||||
extra_params=extra_params,
|
||||
|
@ -102,7 +102,7 @@ class SwarmModeTemplateDefinition(template_def.BaseTemplateDefinition):
|
||||
osc = self.get_osc(context)
|
||||
extra_params['magnum_url'] = osc.magnum_url()
|
||||
|
||||
label_list = ['rexray_preempt']
|
||||
label_list = ['rexray_preempt', 'availability_zone']
|
||||
|
||||
extra_params['auth_url'] = context.auth_url
|
||||
extra_params['nodes_affinity_policy'] = \
|
||||
|
@ -364,6 +364,12 @@ parameters:
|
||||
- allowed_values: ["affinity", "anti-affinity", "soft-affinity",
|
||||
"soft-anti-affinity"]
|
||||
|
||||
availability_zone:
|
||||
type: string
|
||||
description: >
|
||||
availability zone for master and nodes
|
||||
default: ""
|
||||
|
||||
resources:
|
||||
|
||||
######################################################################
|
||||
@ -557,6 +563,7 @@ resources:
|
||||
dns_cluster_domain: {get_param: dns_cluster_domain}
|
||||
openstack_ca: {get_param: openstack_ca}
|
||||
nodes_server_group_id: {get_resource: nodes_server_group}
|
||||
availability_zone: {get_param: availability_zone}
|
||||
|
||||
######################################################################
|
||||
#
|
||||
@ -627,6 +634,7 @@ resources:
|
||||
dns_cluster_domain: {get_param: dns_cluster_domain}
|
||||
openstack_ca: {get_param: openstack_ca}
|
||||
nodes_server_group_id: {get_resource: nodes_server_group}
|
||||
availability_zone: {get_param: availability_zone}
|
||||
|
||||
outputs:
|
||||
|
||||
|
@ -267,6 +267,12 @@ parameters:
|
||||
type: string
|
||||
description: ID of the server group for kubernetes cluster nodes.
|
||||
|
||||
availability_zone:
|
||||
type: string
|
||||
description: >
|
||||
availability zone for master and nodes
|
||||
default: ""
|
||||
|
||||
resources:
|
||||
|
||||
master_wait_handle:
|
||||
@ -526,6 +532,7 @@ resources:
|
||||
networks:
|
||||
- port: {get_resource: kube_master_eth0}
|
||||
scheduler_hints: { group: { get_param: nodes_server_group_id }}
|
||||
availability_zone: {get_param: availability_zone}
|
||||
|
||||
kube_master_eth0:
|
||||
type: OS::Neutron::Port
|
||||
|
@ -235,6 +235,12 @@ parameters:
|
||||
type: string
|
||||
description: ID of the server group for kubernetes cluster nodes.
|
||||
|
||||
availability_zone:
|
||||
type: string
|
||||
description: >
|
||||
availability zone for master and nodes
|
||||
default: ""
|
||||
|
||||
resources:
|
||||
|
||||
minion_wait_handle:
|
||||
@ -438,6 +444,7 @@ resources:
|
||||
networks:
|
||||
- port: {get_resource: kube_minion_eth0}
|
||||
scheduler_hints: { group: { get_param: nodes_server_group_id }}
|
||||
availability_zone: {get_param: availability_zone}
|
||||
|
||||
kube_minion_eth0:
|
||||
type: OS::Neutron::Port
|
||||
|
@ -194,6 +194,12 @@ parameters:
|
||||
- allowed_values: ["affinity", "anti-affinity", "soft-affinity",
|
||||
"soft-anti-affinity"]
|
||||
|
||||
availability_zone:
|
||||
type: string
|
||||
description: >
|
||||
availability zone for master and nodes
|
||||
default: ""
|
||||
|
||||
resources:
|
||||
|
||||
######################################################################
|
||||
@ -329,6 +335,7 @@ resources:
|
||||
verify_ca: {get_param: verify_ca}
|
||||
openstack_ca: {get_param: openstack_ca}
|
||||
nodes_server_group_id: {get_resource: nodes_server_group}
|
||||
availability_zone: {get_param: availability_zone}
|
||||
|
||||
swarm_secondary_masters:
|
||||
type: "OS::Heat::ResourceGroup"
|
||||
@ -373,6 +380,7 @@ resources:
|
||||
verify_ca: {get_param: verify_ca}
|
||||
openstack_ca: {get_param: openstack_ca}
|
||||
nodes_server_group_id: {get_resource: nodes_server_group}
|
||||
availability_zone: {get_param: availability_zone}
|
||||
|
||||
swarm_nodes:
|
||||
type: "OS::Heat::ResourceGroup"
|
||||
@ -417,6 +425,7 @@ resources:
|
||||
verify_ca: {get_param: verify_ca}
|
||||
openstack_ca: {get_param: openstack_ca}
|
||||
nodes_server_group_id: {get_resource: nodes_server_group}
|
||||
availability_zone: {get_param: availability_zone}
|
||||
|
||||
outputs:
|
||||
|
||||
|
@ -144,6 +144,12 @@ parameters:
|
||||
type: string
|
||||
description: ID of the server group for kubernetes cluster nodes.
|
||||
|
||||
availability_zone:
|
||||
type: string
|
||||
description: >
|
||||
availability zone for master and nodes
|
||||
default: ""
|
||||
|
||||
resources:
|
||||
|
||||
master_wait_handle:
|
||||
@ -326,6 +332,7 @@ resources:
|
||||
- port:
|
||||
get_resource: swarm_master_eth0
|
||||
scheduler_hints: { group: { get_param: nodes_server_group_id }}
|
||||
availability_zone: {get_param: availability_zone}
|
||||
|
||||
swarm_master_eth0:
|
||||
type: "OS::Neutron::Port"
|
||||
|
@ -137,6 +137,12 @@ parameters:
|
||||
type: string
|
||||
description: ID of the server group for kubernetes cluster nodes.
|
||||
|
||||
availability_zone:
|
||||
type: string
|
||||
description: >
|
||||
availability zone for master and nodes
|
||||
default: ""
|
||||
|
||||
resources:
|
||||
|
||||
node_wait_handle:
|
||||
@ -298,6 +304,7 @@ resources:
|
||||
- port:
|
||||
get_resource: swarm_node_eth0
|
||||
scheduler_hints: { group: { get_param: nodes_server_group_id }}
|
||||
availability_zone: {get_param: availability_zone}
|
||||
|
||||
swarm_node_eth0:
|
||||
type: "OS::Neutron::Port"
|
||||
|
@ -57,7 +57,8 @@ class TestClusterConductorWithK8s(base.TestCase):
|
||||
'grafana_admin_passwd': 'fake_pwd',
|
||||
'kube_dashboard_enabled': 'True',
|
||||
'docker_volume_type': 'lvmdriver-1',
|
||||
'etcd_volume_size': 0},
|
||||
'etcd_volume_size': 0,
|
||||
'availability_zone': 'az_1'},
|
||||
'tls_disabled': False,
|
||||
'server_type': 'vm',
|
||||
'registry_enabled': False,
|
||||
@ -95,7 +96,8 @@ class TestClusterConductorWithK8s(base.TestCase):
|
||||
'prometheus_monitoring': 'False',
|
||||
'grafana_admin_passwd': 'fake_pwd',
|
||||
'kube_dashboard_enabled': 'True',
|
||||
'docker_volume_type': 'lvmdriver-1'},
|
||||
'docker_volume_type': 'lvmdriver-1',
|
||||
'availability_zone': 'az_1'},
|
||||
'master_flavor_id': 'master_flavor_id',
|
||||
'flavor_id': 'flavor_id',
|
||||
}
|
||||
@ -174,7 +176,8 @@ class TestClusterConductorWithK8s(base.TestCase):
|
||||
'grafana_admin_passwd': 'fake_pwd',
|
||||
'kube_dashboard_enabled': 'True',
|
||||
'docker_volume_type': 'lvmdriver-1',
|
||||
'etcd_volume_size': None},
|
||||
'etcd_volume_size': None,
|
||||
'availability_zone': 'az_1'},
|
||||
'http_proxy': 'http_proxy',
|
||||
'https_proxy': 'https_proxy',
|
||||
'no_proxy': 'no_proxy',
|
||||
@ -229,7 +232,8 @@ class TestClusterConductorWithK8s(base.TestCase):
|
||||
'kube_version': 'fake-version',
|
||||
'verify_ca': True,
|
||||
'openstack_ca': '',
|
||||
"nodes_affinity_policy": "soft-anti-affinity"
|
||||
"nodes_affinity_policy": "soft-anti-affinity",
|
||||
'availability_zone': 'az_1'
|
||||
}
|
||||
if missing_attr is not None:
|
||||
expected.pop(mapping[missing_attr], None)
|
||||
@ -326,7 +330,8 @@ class TestClusterConductorWithK8s(base.TestCase):
|
||||
'kube_version': 'fake-version',
|
||||
'verify_ca': True,
|
||||
'openstack_ca': '',
|
||||
"nodes_affinity_policy": "soft-anti-affinity"
|
||||
"nodes_affinity_policy": "soft-anti-affinity",
|
||||
'availability_zone': 'az_1'
|
||||
}
|
||||
|
||||
self.assertEqual(expected, definition)
|
||||
@ -410,7 +415,8 @@ class TestClusterConductorWithK8s(base.TestCase):
|
||||
'username': 'fake_user',
|
||||
'verify_ca': True,
|
||||
'openstack_ca': '',
|
||||
"nodes_affinity_policy": "soft-anti-affinity"
|
||||
"nodes_affinity_policy": "soft-anti-affinity",
|
||||
'availability_zone': 'az_1'
|
||||
}
|
||||
self.assertEqual(expected, definition)
|
||||
self.assertEqual(
|
||||
@ -488,7 +494,7 @@ class TestClusterConductorWithK8s(base.TestCase):
|
||||
'insecure_registry_url': '10.0.0.1:5000',
|
||||
'kube_version': 'fake-version',
|
||||
'verify_ca': True,
|
||||
'openstack_ca': '',
|
||||
'openstack_ca': ''
|
||||
}
|
||||
self.assertEqual(expected, definition)
|
||||
self.assertEqual(
|
||||
@ -561,7 +567,7 @@ class TestClusterConductorWithK8s(base.TestCase):
|
||||
'insecure_registry_url': '10.0.0.1:5000',
|
||||
'kube_version': 'fake-version',
|
||||
'verify_ca': True,
|
||||
'openstack_ca': '',
|
||||
'openstack_ca': ''
|
||||
}
|
||||
self.assertEqual(expected, definition)
|
||||
self.assertEqual(
|
||||
@ -734,7 +740,8 @@ class TestClusterConductorWithK8s(base.TestCase):
|
||||
'kube_version': 'fake-version',
|
||||
'verify_ca': True,
|
||||
'openstack_ca': '',
|
||||
"nodes_affinity_policy": "soft-anti-affinity"
|
||||
"nodes_affinity_policy": "soft-anti-affinity",
|
||||
'availability_zone': 'az_1'
|
||||
}
|
||||
self.assertEqual(expected, definition)
|
||||
self.assertEqual(
|
||||
|
@ -53,7 +53,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
|
||||
'flannel_network_subnetlen': '26',
|
||||
'flannel_backend': 'vxlan',
|
||||
'rexray_preempt': 'False',
|
||||
'swarm_strategy': 'spread'},
|
||||
'swarm_strategy': 'spread',
|
||||
'availability_zone': 'az_1'},
|
||||
'master_lb_enabled': False,
|
||||
'volume_driver': 'rexray'
|
||||
}
|
||||
@ -81,7 +82,8 @@ class TestClusterConductorWithSwarm(base.TestCase):
|
||||
'flannel_network_subnetlen': '26',
|
||||
'flannel_backend': 'vxlan',
|
||||
'rexray_preempt': 'False',
|
||||
'swarm_strategy': 'spread'},
|
||||
'swarm_strategy': 'spread',
|
||||
'availability_zone': 'az_1'},
|
||||
'coe_version': 'fake-version'
|
||||
}
|
||||
|
||||
@ -266,7 +268,7 @@ class TestClusterConductorWithSwarm(base.TestCase):
|
||||
'https_proxy', 'no_proxy', 'network_driver',
|
||||
'master_flavor_id', 'docker_storage_driver',
|
||||
'volume_driver', 'rexray_preempt', 'fixed_subnet',
|
||||
'docker_volume_type']
|
||||
'docker_volume_type', 'availablity_zone']
|
||||
for key in not_required:
|
||||
self.cluster_template_dict[key] = None
|
||||
self.cluster_dict['discovery_url'] = 'https://discovery.etcd.io/test'
|
||||
|
@ -272,6 +272,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
||||
kube_tag = mock_cluster.labels.get('kube_tag')
|
||||
container_infra_prefix = mock_cluster.labels.get(
|
||||
'container_infra_prefix')
|
||||
availability_zone = mock_cluster.labels.get(
|
||||
'availability_zone')
|
||||
|
||||
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
||||
|
||||
@ -297,7 +299,9 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
||||
'region_name': mock_osc.cinder_region_name.return_value,
|
||||
'kube_tag': kube_tag,
|
||||
'container_infra_prefix': container_infra_prefix,
|
||||
'nodes_affinity_policy': 'soft-anti-affinity'}}
|
||||
'nodes_affinity_policy': 'soft-anti-affinity',
|
||||
'availability_zone': availability_zone,
|
||||
}}
|
||||
mock_get_params.assert_called_once_with(mock_context,
|
||||
mock_cluster_template,
|
||||
mock_cluster,
|
||||
@ -356,6 +360,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
||||
kube_tag = mock_cluster.labels.get('kube_tag')
|
||||
container_infra_prefix = mock_cluster.labels.get(
|
||||
'container_infra_prefix')
|
||||
availability_zone = mock_cluster.labels.get(
|
||||
'availability_zone')
|
||||
|
||||
k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition()
|
||||
|
||||
@ -383,7 +389,9 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase):
|
||||
'kubernetes_port': 8080,
|
||||
'kube_tag': kube_tag,
|
||||
'container_infra_prefix': container_infra_prefix,
|
||||
'nodes_affinity_policy': 'soft-anti-affinity'}}
|
||||
'nodes_affinity_policy': 'soft-anti-affinity',
|
||||
'availability_zone': availability_zone,
|
||||
}}
|
||||
mock_get_params.assert_called_once_with(mock_context,
|
||||
mock_cluster_template,
|
||||
mock_cluster,
|
||||
|
@ -0,0 +1,6 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Support passing an availability zone where all cluster nodes should be
|
||||
deployed, via the new availability_zone label. Both swarm_fedora_atomic_v2
|
||||
and k8s_fedora_atomic_v1 support this new label.
|
Loading…
x
Reference in New Issue
Block a user