From 53d386dc013337cbb52ac45994fb70af8d64820f Mon Sep 17 00:00:00 2001 From: Ricardo Rocha Date: Wed, 13 Dec 2017 10:51:10 +0000 Subject: [PATCH] Add label availability_zone Add a new label 'availability_zone' allowing users to specify the AZ the nodes should be deployed in. Only one AZ can be passed for this first implementation. Change-Id: I9e55d7631191fffa6cc6b9bebbeb4faf2497815b Partially-Implements: blueprint magnum-availability-zones --- doc/source/user/index.rst | 15 ++++++++++- .../drivers/heat/k8s_fedora_template_def.py | 16 ++++++------ .../drivers/heat/swarm_mode_template_def.py | 2 +- .../templates/kubecluster.yaml | 8 ++++++ .../templates/kubemaster.yaml | 7 ++++++ .../templates/kubeminion.yaml | 7 ++++++ .../templates/swarmcluster.yaml | 9 +++++++ .../templates/swarmmaster.yaml | 7 ++++++ .../templates/swarmnode.yaml | 7 ++++++ .../handlers/test_k8s_cluster_conductor.py | 25 ++++++++++++------- .../handlers/test_swarm_cluster_conductor.py | 8 +++--- .../unit/drivers/test_template_definition.py | 12 +++++++-- .../availability_zone-2d73671f5ea065d8.yaml | 6 +++++ 13 files changed, 104 insertions(+), 25 deletions(-) create mode 100644 releasenotes/notes/availability_zone-2d73671f5ea065d8.yaml diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst index 1bda195fd1..d9ff00449d 100644 --- a/doc/source/user/index.rst +++ b/doc/source/user/index.rst @@ -341,6 +341,10 @@ the table are linked to more details elsewhere in the user guide. +---------------------------------------+--------------------+---------------+ | `container_infra_prefix`_ | see below | "" | +---------------------------------------+--------------------+---------------+ ++---------------------------------------+--------------------+---------------+ +| `availability_zone`_ | AZ for the cluster | "" | +| | nodes | | ++---------------------------------------+--------------------+---------------+ Cluster ------- @@ -2020,7 +2024,16 @@ _`flannel_backend` High Availability ================= -*To be filled in* + +Support for highly available clusters is a work in progress, the goal being to +enable clusters spanning multiple availability zones. + +As of today you can specify one single availability zone for you cluster. + +_`availability_zone` + The availability zone where the cluster nodes should be deployed. If not + specified, the default is None. + Scaling ======= diff --git a/magnum/drivers/heat/k8s_fedora_template_def.py b/magnum/drivers/heat/k8s_fedora_template_def.py index 3ba3433cf9..a25f9db997 100644 --- a/magnum/drivers/heat/k8s_fedora_template_def.py +++ b/magnum/drivers/heat/k8s_fedora_template_def.py @@ -78,18 +78,16 @@ class K8sFedoraTemplateDefinition(k8s_template_def.K8sTemplateDefinition): 'docker_volume_type', CONF.cinder.default_docker_volume_type) extra_params['docker_volume_type'] = docker_volume_type - kube_tag = cluster.labels.get('kube_tag') - if kube_tag: - extra_params['kube_tag'] = kube_tag - - container_infra_prefix = cluster.labels.get( - 'container_infra_prefix') - if container_infra_prefix: - extra_params['container_infra_prefix'] = container_infra_prefix - extra_params['nodes_affinity_policy'] = \ CONF.cluster.nodes_affinity_policy + label_list = ['kube_tag', 'container_infra_prefix', + 'availability_zone'] + for label in label_list: + label_value = cluster.labels.get(label) + if label_value: + extra_params[label] = label_value + return super(K8sFedoraTemplateDefinition, self).get_params(context, cluster_template, cluster, extra_params=extra_params, diff --git a/magnum/drivers/heat/swarm_mode_template_def.py b/magnum/drivers/heat/swarm_mode_template_def.py index a8c256db50..87cdbc9dfc 100644 --- a/magnum/drivers/heat/swarm_mode_template_def.py +++ b/magnum/drivers/heat/swarm_mode_template_def.py @@ -102,7 +102,7 @@ class SwarmModeTemplateDefinition(template_def.BaseTemplateDefinition): osc = self.get_osc(context) extra_params['magnum_url'] = osc.magnum_url() - label_list = ['rexray_preempt'] + label_list = ['rexray_preempt', 'availability_zone'] extra_params['auth_url'] = context.auth_url extra_params['nodes_affinity_policy'] = \ diff --git a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml index 0cc80d038a..bbf22925f9 100644 --- a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml +++ b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubecluster.yaml @@ -364,6 +364,12 @@ parameters: - allowed_values: ["affinity", "anti-affinity", "soft-affinity", "soft-anti-affinity"] + availability_zone: + type: string + description: > + availability zone for master and nodes + default: "" + resources: ###################################################################### @@ -557,6 +563,7 @@ resources: dns_cluster_domain: {get_param: dns_cluster_domain} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} + availability_zone: {get_param: availability_zone} ###################################################################### # @@ -627,6 +634,7 @@ resources: dns_cluster_domain: {get_param: dns_cluster_domain} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} + availability_zone: {get_param: availability_zone} outputs: diff --git a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml index 85ec60f5bb..aeb9f4f593 100644 --- a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml +++ b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster.yaml @@ -267,6 +267,12 @@ parameters: type: string description: ID of the server group for kubernetes cluster nodes. + availability_zone: + type: string + description: > + availability zone for master and nodes + default: "" + resources: master_wait_handle: @@ -526,6 +532,7 @@ resources: networks: - port: {get_resource: kube_master_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} + availability_zone: {get_param: availability_zone} kube_master_eth0: type: OS::Neutron::Port diff --git a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml index 183d5430ab..df8439adeb 100644 --- a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml +++ b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion.yaml @@ -235,6 +235,12 @@ parameters: type: string description: ID of the server group for kubernetes cluster nodes. + availability_zone: + type: string + description: > + availability zone for master and nodes + default: "" + resources: minion_wait_handle: @@ -438,6 +444,7 @@ resources: networks: - port: {get_resource: kube_minion_eth0} scheduler_hints: { group: { get_param: nodes_server_group_id }} + availability_zone: {get_param: availability_zone} kube_minion_eth0: type: OS::Neutron::Port diff --git a/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmcluster.yaml b/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmcluster.yaml index 8a0aa63507..885afa6542 100644 --- a/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmcluster.yaml +++ b/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmcluster.yaml @@ -194,6 +194,12 @@ parameters: - allowed_values: ["affinity", "anti-affinity", "soft-affinity", "soft-anti-affinity"] + availability_zone: + type: string + description: > + availability zone for master and nodes + default: "" + resources: ###################################################################### @@ -329,6 +335,7 @@ resources: verify_ca: {get_param: verify_ca} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} + availability_zone: {get_param: availability_zone} swarm_secondary_masters: type: "OS::Heat::ResourceGroup" @@ -373,6 +380,7 @@ resources: verify_ca: {get_param: verify_ca} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} + availability_zone: {get_param: availability_zone} swarm_nodes: type: "OS::Heat::ResourceGroup" @@ -417,6 +425,7 @@ resources: verify_ca: {get_param: verify_ca} openstack_ca: {get_param: openstack_ca} nodes_server_group_id: {get_resource: nodes_server_group} + availability_zone: {get_param: availability_zone} outputs: diff --git a/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmmaster.yaml b/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmmaster.yaml index a9a62e4eb4..1c50252ba1 100644 --- a/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmmaster.yaml +++ b/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmmaster.yaml @@ -144,6 +144,12 @@ parameters: type: string description: ID of the server group for kubernetes cluster nodes. + availability_zone: + type: string + description: > + availability zone for master and nodes + default: "" + resources: master_wait_handle: @@ -326,6 +332,7 @@ resources: - port: get_resource: swarm_master_eth0 scheduler_hints: { group: { get_param: nodes_server_group_id }} + availability_zone: {get_param: availability_zone} swarm_master_eth0: type: "OS::Neutron::Port" diff --git a/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmnode.yaml b/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmnode.yaml index adfbdaa6b9..f33a5802d1 100644 --- a/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmnode.yaml +++ b/magnum/drivers/swarm_fedora_atomic_v2/templates/swarmnode.yaml @@ -137,6 +137,12 @@ parameters: type: string description: ID of the server group for kubernetes cluster nodes. + availability_zone: + type: string + description: > + availability zone for master and nodes + default: "" + resources: node_wait_handle: @@ -298,6 +304,7 @@ resources: - port: get_resource: swarm_node_eth0 scheduler_hints: { group: { get_param: nodes_server_group_id }} + availability_zone: {get_param: availability_zone} swarm_node_eth0: type: "OS::Neutron::Port" diff --git a/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py b/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py index 7b8195520c..9cafcd31da 100644 --- a/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py +++ b/magnum/tests/unit/conductor/handlers/test_k8s_cluster_conductor.py @@ -57,7 +57,8 @@ class TestClusterConductorWithK8s(base.TestCase): 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'docker_volume_type': 'lvmdriver-1', - 'etcd_volume_size': 0}, + 'etcd_volume_size': 0, + 'availability_zone': 'az_1'}, 'tls_disabled': False, 'server_type': 'vm', 'registry_enabled': False, @@ -95,7 +96,8 @@ class TestClusterConductorWithK8s(base.TestCase): 'prometheus_monitoring': 'False', 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', - 'docker_volume_type': 'lvmdriver-1'}, + 'docker_volume_type': 'lvmdriver-1', + 'availability_zone': 'az_1'}, 'master_flavor_id': 'master_flavor_id', 'flavor_id': 'flavor_id', } @@ -174,7 +176,8 @@ class TestClusterConductorWithK8s(base.TestCase): 'grafana_admin_passwd': 'fake_pwd', 'kube_dashboard_enabled': 'True', 'docker_volume_type': 'lvmdriver-1', - 'etcd_volume_size': None}, + 'etcd_volume_size': None, + 'availability_zone': 'az_1'}, 'http_proxy': 'http_proxy', 'https_proxy': 'https_proxy', 'no_proxy': 'no_proxy', @@ -229,7 +232,8 @@ class TestClusterConductorWithK8s(base.TestCase): 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', - "nodes_affinity_policy": "soft-anti-affinity" + "nodes_affinity_policy": "soft-anti-affinity", + 'availability_zone': 'az_1' } if missing_attr is not None: expected.pop(mapping[missing_attr], None) @@ -326,7 +330,8 @@ class TestClusterConductorWithK8s(base.TestCase): 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', - "nodes_affinity_policy": "soft-anti-affinity" + "nodes_affinity_policy": "soft-anti-affinity", + 'availability_zone': 'az_1' } self.assertEqual(expected, definition) @@ -410,7 +415,8 @@ class TestClusterConductorWithK8s(base.TestCase): 'username': 'fake_user', 'verify_ca': True, 'openstack_ca': '', - "nodes_affinity_policy": "soft-anti-affinity" + "nodes_affinity_policy": "soft-anti-affinity", + 'availability_zone': 'az_1' } self.assertEqual(expected, definition) self.assertEqual( @@ -488,7 +494,7 @@ class TestClusterConductorWithK8s(base.TestCase): 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, - 'openstack_ca': '', + 'openstack_ca': '' } self.assertEqual(expected, definition) self.assertEqual( @@ -561,7 +567,7 @@ class TestClusterConductorWithK8s(base.TestCase): 'insecure_registry_url': '10.0.0.1:5000', 'kube_version': 'fake-version', 'verify_ca': True, - 'openstack_ca': '', + 'openstack_ca': '' } self.assertEqual(expected, definition) self.assertEqual( @@ -734,7 +740,8 @@ class TestClusterConductorWithK8s(base.TestCase): 'kube_version': 'fake-version', 'verify_ca': True, 'openstack_ca': '', - "nodes_affinity_policy": "soft-anti-affinity" + "nodes_affinity_policy": "soft-anti-affinity", + 'availability_zone': 'az_1' } self.assertEqual(expected, definition) self.assertEqual( diff --git a/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py b/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py index a0a51a0c66..0fecf15850 100644 --- a/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py +++ b/magnum/tests/unit/conductor/handlers/test_swarm_cluster_conductor.py @@ -53,7 +53,8 @@ class TestClusterConductorWithSwarm(base.TestCase): 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'rexray_preempt': 'False', - 'swarm_strategy': 'spread'}, + 'swarm_strategy': 'spread', + 'availability_zone': 'az_1'}, 'master_lb_enabled': False, 'volume_driver': 'rexray' } @@ -81,7 +82,8 @@ class TestClusterConductorWithSwarm(base.TestCase): 'flannel_network_subnetlen': '26', 'flannel_backend': 'vxlan', 'rexray_preempt': 'False', - 'swarm_strategy': 'spread'}, + 'swarm_strategy': 'spread', + 'availability_zone': 'az_1'}, 'coe_version': 'fake-version' } @@ -266,7 +268,7 @@ class TestClusterConductorWithSwarm(base.TestCase): 'https_proxy', 'no_proxy', 'network_driver', 'master_flavor_id', 'docker_storage_driver', 'volume_driver', 'rexray_preempt', 'fixed_subnet', - 'docker_volume_type'] + 'docker_volume_type', 'availablity_zone'] for key in not_required: self.cluster_template_dict[key] = None self.cluster_dict['discovery_url'] = 'https://discovery.etcd.io/test' diff --git a/magnum/tests/unit/drivers/test_template_definition.py b/magnum/tests/unit/drivers/test_template_definition.py index d4cf4f5d10..4a18c7ffa1 100644 --- a/magnum/tests/unit/drivers/test_template_definition.py +++ b/magnum/tests/unit/drivers/test_template_definition.py @@ -272,6 +272,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase): kube_tag = mock_cluster.labels.get('kube_tag') container_infra_prefix = mock_cluster.labels.get( 'container_infra_prefix') + availability_zone = mock_cluster.labels.get( + 'availability_zone') k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() @@ -297,7 +299,9 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase): 'region_name': mock_osc.cinder_region_name.return_value, 'kube_tag': kube_tag, 'container_infra_prefix': container_infra_prefix, - 'nodes_affinity_policy': 'soft-anti-affinity'}} + 'nodes_affinity_policy': 'soft-anti-affinity', + 'availability_zone': availability_zone, + }} mock_get_params.assert_called_once_with(mock_context, mock_cluster_template, mock_cluster, @@ -356,6 +360,8 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase): kube_tag = mock_cluster.labels.get('kube_tag') container_infra_prefix = mock_cluster.labels.get( 'container_infra_prefix') + availability_zone = mock_cluster.labels.get( + 'availability_zone') k8s_def = k8sa_tdef.AtomicK8sTemplateDefinition() @@ -383,7 +389,9 @@ class AtomicK8sTemplateDefinitionTestCase(BaseTemplateDefinitionTestCase): 'kubernetes_port': 8080, 'kube_tag': kube_tag, 'container_infra_prefix': container_infra_prefix, - 'nodes_affinity_policy': 'soft-anti-affinity'}} + 'nodes_affinity_policy': 'soft-anti-affinity', + 'availability_zone': availability_zone, + }} mock_get_params.assert_called_once_with(mock_context, mock_cluster_template, mock_cluster, diff --git a/releasenotes/notes/availability_zone-2d73671f5ea065d8.yaml b/releasenotes/notes/availability_zone-2d73671f5ea065d8.yaml new file mode 100644 index 0000000000..48c7989222 --- /dev/null +++ b/releasenotes/notes/availability_zone-2d73671f5ea065d8.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Support passing an availability zone where all cluster nodes should be + deployed, via the new availability_zone label. Both swarm_fedora_atomic_v2 + and k8s_fedora_atomic_v1 support this new label.