From 9bf44b4a4cacc0e3e2a3f1dc1b4f6744ba69d8e1 Mon Sep 17 00:00:00 2001 From: "James E. Blair" Date: Sun, 5 Feb 2023 08:33:08 -0800 Subject: [PATCH] Add scheduler, volumes, and labels to k8s/openshift This adds support for specifying the scheduler name, volumes (and volume mounts), and additional metadata labels to the Kubernetes and OpenShift (and OpenShift pods) drivers. This also extends the k8s and openshift test frameworks so that we can exercise the new code paths (as well as some previous similar settings). Tests and assertions for both a minimal (mostly defaults) configuration as well as a configuration that uses all the optional settings are added. Change-Id: I648e88a518c311b53c8ee26013a324a5013f3be3 --- doc/source/kubernetes.rst | 36 +++++++ doc/source/openshift-pods.rst | 28 ++++++ doc/source/openshift.rst | 36 +++++++ nodepool/driver/kubernetes/config.py | 8 ++ nodepool/driver/kubernetes/handler.py | 2 +- nodepool/driver/kubernetes/provider.py | 48 +++++++--- nodepool/driver/openshift/config.py | 8 ++ nodepool/driver/openshift/handler.py | 4 +- nodepool/driver/openshift/provider.py | 40 +++++++- nodepool/driver/openshiftpods/config.py | 4 + nodepool/driver/openshiftpods/handler.py | 3 +- .../tests/fixtures/config_validate/good.yaml | 20 ++++ nodepool/tests/fixtures/kubernetes.yaml | 17 ++++ nodepool/tests/fixtures/openshift.yaml | 21 ++++- nodepool/tests/unit/test_driver_kubernetes.py | 83 +++++++++++++++- nodepool/tests/unit/test_driver_openshift.py | 94 ++++++++++++++++++- ...es-schedulers-labels-e04764f014b07424.yaml | 4 + 17 files changed, 433 insertions(+), 23 deletions(-) create mode 100644 releasenotes/notes/pod-volumes-schedulers-labels-e04764f014b07424.yaml diff --git a/doc/source/kubernetes.rst b/doc/source/kubernetes.rst index d70854bb5..196e48693 100644 --- a/doc/source/kubernetes.rst +++ b/doc/source/kubernetes.rst @@ -188,6 +188,15 @@ Selecting the kubernetes driver adds the following options to the The ImagePullPolicy, can be IfNotPresent, Always or Never. + .. attr:: labels + :type: dict + + A dictionary of additional values to be added to the + namespace or pod metadata. The value of this field is + added to the `metadata.labels` field in Kubernetes. Note + that this field contains arbitrary key/value pairs and is + unrelated to the concept of labels in Nodepool. + .. attr:: python-path :type: str :default: auto @@ -262,6 +271,15 @@ Selecting the kubernetes driver adds the following options to the A map of key-value pairs to ensure the Kubernetes scheduler places the Pod on a node with specific node labels. + .. attr:: scheduler-name + :type: str + + Only used by the + :value:`providers.[kubernetes].pools.labels.type.pod` + label type. Sets the `schedulerName` field on the + container. Normally left unset for the Kubernetes + default. + .. attr:: privileged :type: bool @@ -269,3 +287,21 @@ Selecting the kubernetes driver adds the following options to the :value:`providers.[kubernetes].pools.labels.type.pod` label type. Sets the `securityContext.privileged` flag on the container. Normally left unset for the Kubernetes default. + + .. attr:: volumes + :type: list + + Only used by the + :value:`providers.[kubernetes].pools.labels.type.pod` + label type. Sets the `volumes` field on the pod. If + supplied, this should be a list of Kubernetes Pod Volume + definitions. + + .. attr:: volume-mounts + :type: list + + Only used by the + :value:`providers.[kubernetes].pools.labels.type.pod` + label type. Sets the `volumeMounts` flag on the + container. If supplied, this should be a list of + Kubernetes Container VolumeMount definitions. diff --git a/doc/source/openshift-pods.rst b/doc/source/openshift-pods.rst index 96ea78c5a..3e03eea4a 100644 --- a/doc/source/openshift-pods.rst +++ b/doc/source/openshift-pods.rst @@ -123,6 +123,15 @@ Selecting the openshift pods driver adds the following options to the image-pull-secrets: - name: registry-secret + .. attr:: labels + :type: dict + + A dictionary of additional values to be added to the + namespace or pod metadata. The value of this field is + added to the `metadata.labels` field in OpenShift. Note + that this field contains arbitrary key/value pairs and is + unrelated to the concept of labels in Nodepool. + .. attr:: cpu :type: int @@ -182,8 +191,27 @@ Selecting the openshift pods driver adds the following options to the A map of key-value pairs to ensure the OpenShift scheduler places the Pod on a node with specific node labels. + .. attr:: scheduler-name + :type: str + + Sets the `schedulerName` field on the container. Normally + left unset for the OpenShift default. + .. attr:: privileged :type: bool Sets the `securityContext.privileged` flag on the container. Normally left unset for the OpenShift default. + + .. attr:: volumes + :type: list + + Sets the `volumes` field on the pod. If supplied, this + should be a list of OpenShift Pod Volume definitions. + + .. attr:: volume-mounts + :type: list + + Sets the `volumeMounts` flag on the container. If + supplied, this should be a list of OpenShift Container + VolumeMount definitions. diff --git a/doc/source/openshift.rst b/doc/source/openshift.rst index 34ea028bf..cf4f7ba8c 100644 --- a/doc/source/openshift.rst +++ b/doc/source/openshift.rst @@ -159,6 +159,15 @@ Selecting the openshift driver adds the following options to the image-pull-secrets: - name: registry-secret + .. attr:: labels + :type: dict + + A dictionary of additional values to be added to the + namespace or pod metadata. The value of this field is + added to the `metadata.labels` field in OpenShift. Note + that this field contains arbitrary key/value pairs and is + unrelated to the concept of labels in Nodepool. + .. attr:: python-path :type: str :default: auto @@ -226,6 +235,15 @@ Selecting the openshift driver adds the following options to the A map of key-value pairs to ensure the OpenShift scheduler places the Pod on a node with specific node labels. + .. attr:: scheduler-name + :type: str + + Only used by the + :value:`providers.[openshift].pools.labels.type.pod` + label type. Sets the `schedulerName` field on the + container. Normally left unset for the OpenShift + default. + .. attr:: privileged :type: bool @@ -233,3 +251,21 @@ Selecting the openshift driver adds the following options to the :value:`providers.[openshift].pools.labels.type.pod` label type. Sets the `securityContext.privileged` flag on the container. Normally left unset for the OpenShift default. + + .. attr:: volumes + :type: list + + Only used by the + :value:`providers.[openshift].pools.labels.type.pod` + label type. Sets the `volumes` field on the pod. If + supplied, this should be a list of OpenShift Pod Volume + definitions. + + .. attr:: volume-mounts + :type: list + + Only used by the + :value:`providers.[openshift].pools.labels.type.pod` + label type. Sets the `volumeMounts` flag on the + container. If supplied, this should be a list of + OpenShift Container VolumeMount definitions. diff --git a/nodepool/driver/kubernetes/config.py b/nodepool/driver/kubernetes/config.py index 3cb71860c..11310065c 100644 --- a/nodepool/driver/kubernetes/config.py +++ b/nodepool/driver/kubernetes/config.py @@ -57,6 +57,10 @@ class KubernetesPool(ConfigPool): pl.env = label.get('env', []) pl.node_selector = label.get('node-selector') pl.privileged = label.get('privileged') + pl.scheduler_name = label.get('scheduler-name') + pl.volumes = label.get('volumes') + pl.volume_mounts = label.get('volume-mounts') + pl.labels = label.get('labels') pl.pool = self self.labels[pl.name] = pl full_config.labels[label['name']].pools.append(self) @@ -104,6 +108,10 @@ class KubernetesProviderConfig(ProviderConfig): 'env': [env_var], 'node-selector': dict, 'privileged': bool, + 'scheduler-name': str, + 'volumes': list, + 'volume-mounts': list, + 'labels': dict, } pool = ConfigPool.getCommonSchemaDict() diff --git a/nodepool/driver/kubernetes/handler.py b/nodepool/driver/kubernetes/handler.py index 50c4553ca..6286a6d11 100644 --- a/nodepool/driver/kubernetes/handler.py +++ b/nodepool/driver/kubernetes/handler.py @@ -32,7 +32,7 @@ class K8SLauncher(NodeLauncher): self.log.debug("Creating resource") if self.label.type == "namespace": resource = self.handler.manager.createNamespace( - self.node, self.handler.pool.name) + self.node, self.handler.pool.name, self.label) else: resource = self.handler.manager.createPod( self.node, self.handler.pool.name, self.label) diff --git a/nodepool/driver/kubernetes/provider.py b/nodepool/driver/kubernetes/provider.py index cd7ab348b..34dae6ec8 100644 --- a/nodepool/driver/kubernetes/provider.py +++ b/nodepool/driver/kubernetes/provider.py @@ -155,23 +155,30 @@ class KubernetesProvider(Provider, QuotaSupport): break time.sleep(1) - def createNamespace(self, node, pool, restricted_access=False): + def createNamespace(self, node, pool, label, restricted_access=False): name = node.id namespace = "%s-%s" % (pool, name) user = "zuul-worker" self.log.debug("%s: creating namespace" % namespace) + + k8s_labels = {} + if label.labels: + k8s_labels.update(label.labels) + k8s_labels.update({ + 'nodepool_node_id': node.id, + 'nodepool_provider_name': self.provider.name, + 'nodepool_pool_name': pool, + 'nodepool_node_label': label.name, + }) + # Create the namespace ns_body = { 'apiVersion': 'v1', 'kind': 'Namespace', 'metadata': { 'name': namespace, - 'labels': { - 'nodepool_node_id': node.id, - 'nodepool_provider_name': self.provider.name, - 'nodepool_pool_name': pool, - } + 'labels': k8s_labels, } } proj = self.k8s_client.create_namespace(ns_body) @@ -330,28 +337,43 @@ class KubernetesProvider(Provider, QuotaSupport): if label.node_selector: spec_body['nodeSelector'] = label.node_selector + if label.scheduler_name: + spec_body['schedulerName'] = label.scheduler_name + + if label.volumes: + spec_body['volumes'] = label.volumes + + if label.volume_mounts: + container_body['volumeMounts'] = label.volume_mounts + if label.privileged is not None: container_body['securityContext'] = { 'privileged': label.privileged, } + k8s_labels = {} + if label.labels: + k8s_labels.update(label.labels) + k8s_labels.update({ + 'nodepool_node_id': node.id, + 'nodepool_provider_name': self.provider.name, + 'nodepool_pool_name': pool, + 'nodepool_node_label': label.name, + }) + pod_body = { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': label.name, - 'labels': { - 'nodepool_node_id': node.id, - 'nodepool_provider_name': self.provider.name, - 'nodepool_pool_name': pool, - 'nodepool_node_label': label.name, - } + 'labels': k8s_labels, }, 'spec': spec_body, 'restartPolicy': 'Never', } - resource = self.createNamespace(node, pool, restricted_access=True) + resource = self.createNamespace(node, pool, label, + restricted_access=True) namespace = resource['namespace'] self.k8s_client.create_namespaced_pod(namespace, pod_body) diff --git a/nodepool/driver/openshift/config.py b/nodepool/driver/openshift/config.py index 34b84c40e..1e7c1909a 100644 --- a/nodepool/driver/openshift/config.py +++ b/nodepool/driver/openshift/config.py @@ -53,6 +53,10 @@ class OpenshiftPool(ConfigPool): pl.env = label.get('env', []) pl.node_selector = label.get('node-selector') pl.privileged = label.get('privileged') + pl.scheduler_name = label.get('scheduler-name') + pl.volumes = label.get('volumes') + pl.volume_mounts = label.get('volume-mounts') + pl.labels = label.get('labels') pl.pool = self self.labels[pl.name] = pl full_config.labels[label['name']].pools.append(self) @@ -101,6 +105,10 @@ class OpenshiftProviderConfig(ProviderConfig): 'env': [env_var], 'node-selector': dict, 'privileged': bool, + 'scheduler-name': str, + 'volumes': list, + 'volume-mounts': list, + 'labels': dict, } pool = ConfigPool.getCommonSchemaDict() diff --git a/nodepool/driver/openshift/handler.py b/nodepool/driver/openshift/handler.py index 7131955f7..669467312 100644 --- a/nodepool/driver/openshift/handler.py +++ b/nodepool/driver/openshift/handler.py @@ -31,12 +31,14 @@ class OpenshiftLauncher(NodeLauncher): def _launchLabel(self): self.log.debug("Creating resource") project = "%s-%s" % (self.handler.pool.name, self.node.id) - self.node.external_id = self.handler.manager.createProject(project) + self.node.external_id = self.handler.manager.createProject( + self.node, self.handler.pool.name, project, self.label) self.zk.storeNode(self.node) resource = self.handler.manager.prepareProject(project) if self.label.type == "pod": self.handler.manager.createPod( + self.node, self.handler.pool.name, project, self.label.name, self.label) self.handler.manager.waitForPod(project, self.label.name) resource['pod'] = self.label.name diff --git a/nodepool/driver/openshift/provider.py b/nodepool/driver/openshift/provider.py index 3a894d882..e584273d3 100644 --- a/nodepool/driver/openshift/provider.py +++ b/nodepool/driver/openshift/provider.py @@ -125,14 +125,26 @@ class OpenshiftProvider(Provider, QuotaSupport): break time.sleep(1) - def createProject(self, project): + def createProject(self, node, pool, project, label): self.log.debug("%s: creating project" % project) # Create the project + + k8s_labels = {} + if label.labels: + k8s_labels.update(label.labels) + k8s_labels.update({ + 'nodepool_node_id': node.id, + 'nodepool_provider_name': self.provider.name, + 'nodepool_pool_name': pool, + 'nodepool_node_label': label.name, + }) + proj_body = { 'apiVersion': 'project.openshift.io/v1', 'kind': 'ProjectRequest', 'metadata': { 'name': project, + 'labels': k8s_labels, } } projects = self.os_client.resources.get( @@ -211,7 +223,7 @@ class OpenshiftProvider(Provider, QuotaSupport): self.log.info("%s: project created" % project) return resource - def createPod(self, project, pod_name, label): + def createPod(self, node, pool, project, pod_name, label): self.log.debug("%s: creating pod in project %s" % (pod_name, project)) container_body = { 'name': label.name, @@ -239,15 +251,37 @@ class OpenshiftProvider(Provider, QuotaSupport): if label.node_selector: spec_body['nodeSelector'] = label.node_selector + if label.scheduler_name: + spec_body['schedulerName'] = label.scheduler_name + + if label.volumes: + spec_body['volumes'] = label.volumes + + if label.volume_mounts: + container_body['volumeMounts'] = label.volume_mounts + if label.privileged is not None: container_body['securityContext'] = { 'privileged': label.privileged, } + k8s_labels = {} + if label.labels: + k8s_labels.update(label.labels) + k8s_labels.update({ + 'nodepool_node_id': node.id, + 'nodepool_provider_name': self.provider.name, + 'nodepool_pool_name': pool, + 'nodepool_node_label': label.name, + }) + pod_body = { 'apiVersion': 'v1', 'kind': 'Pod', - 'metadata': {'name': pod_name}, + 'metadata': { + 'name': pod_name, + 'labels': k8s_labels, + }, 'spec': spec_body, 'restartPolicy': 'Never', } diff --git a/nodepool/driver/openshiftpods/config.py b/nodepool/driver/openshiftpods/config.py index 2dfe4e59c..253014b2a 100644 --- a/nodepool/driver/openshiftpods/config.py +++ b/nodepool/driver/openshiftpods/config.py @@ -61,6 +61,10 @@ class OpenshiftPodsProviderConfig(OpenshiftProviderConfig): 'env': [env_var], 'node-selector': dict, 'privileged': bool, + 'scheduler-name': str, + 'volumes': list, + 'volume-mounts': list, + 'labels': dict, } pool = ConfigPool.getCommonSchemaDict() diff --git a/nodepool/driver/openshiftpods/handler.py b/nodepool/driver/openshiftpods/handler.py index 65c733a68..b2482b3bd 100644 --- a/nodepool/driver/openshiftpods/handler.py +++ b/nodepool/driver/openshiftpods/handler.py @@ -25,7 +25,8 @@ class OpenshiftPodLauncher(OpenshiftLauncher): self.log.debug("Creating resource") pod_name = "%s-%s" % (self.label.name, self.node.id) project = self.handler.pool.name - self.handler.manager.createPod(project, pod_name, self.label) + self.handler.manager.createPod(self.node, self.handler.pool.name, + project, pod_name, self.label) self.node.external_id = "%s-%s" % (project, pod_name) self.node.interface_ip = pod_name self.zk.storeNode(self.node) diff --git a/nodepool/tests/fixtures/config_validate/good.yaml b/nodepool/tests/fixtures/config_validate/good.yaml index daf7573ed..05a50a96e 100644 --- a/nodepool/tests/fixtures/config_validate/good.yaml +++ b/nodepool/tests/fixtures/config_validate/good.yaml @@ -158,6 +158,16 @@ providers: node-selector: storageType: ssd privileged: true + volumes: + - name: my-csi-inline-vol + csi: + driver: inline.storage.kubernetes.io + volume-mounts: + - mountPath: "/data" + name: my-csi-inline-vol + scheduler-name: niftyScheduler + labels: + environment: qa - name: openshift driver: openshift @@ -181,6 +191,16 @@ providers: node-selector: storageType: ssd privileged: true + volumes: + - name: my-csi-inline-vol + csi: + driver: inline.storage.kubernetes.io + volume-mounts: + - mountPath: "/data" + name: my-csi-inline-vol + scheduler-name: niftyScheduler + labels: + environment: qa - name: ec2-us-east-2 driver: aws diff --git a/nodepool/tests/fixtures/kubernetes.yaml b/nodepool/tests/fixtures/kubernetes.yaml index f747a5707..116410774 100644 --- a/nodepool/tests/fixtures/kubernetes.yaml +++ b/nodepool/tests/fixtures/kubernetes.yaml @@ -14,6 +14,7 @@ tenant-resource-limits: labels: - name: pod-fedora + - name: pod-extra - name: kubernetes-namespace providers: @@ -31,3 +32,19 @@ providers: - name: pod-fedora type: pod image: docker.io/fedora:28 + - name: pod-extra + type: pod + image: docker.io/fedora:28 + labels: + environment: qa + privileged: true + node-selector: + storageType: ssd + scheduler-name: myscheduler + volumes: + - name: my-csi-inline-vol + csi: + driver: inline.storage.kubernetes.io + volume-mounts: + - name: my-csi-inline-vol + mountPath: /data diff --git a/nodepool/tests/fixtures/openshift.yaml b/nodepool/tests/fixtures/openshift.yaml index 7cb6f7fda..e8b4f7ff1 100644 --- a/nodepool/tests/fixtures/openshift.yaml +++ b/nodepool/tests/fixtures/openshift.yaml @@ -14,6 +14,7 @@ tenant-resource-limits: labels: - name: pod-fedora + - name: pod-extra - name: openshift-project - name: pod-fedora-secret @@ -32,10 +33,26 @@ providers: - name: pod-fedora type: pod image: docker.io/fedora:28 - python-path: '/usr/bin/python3' - shell-type: csh - name: pod-fedora-secret type: pod image: docker.io/fedora:28 image-pull-secrets: - name: registry-secret + - name: pod-extra + type: pod + image: docker.io/fedora:28 + python-path: '/usr/bin/python3' + shell-type: csh + labels: + environment: qa + privileged: true + node-selector: + storageType: ssd + scheduler-name: myscheduler + volumes: + - name: my-csi-inline-vol + csi: + driver: inline.storage.kubernetes.io + volume-mounts: + - name: my-csi-inline-vol + mountPath: /data diff --git a/nodepool/tests/unit/test_driver_kubernetes.py b/nodepool/tests/unit/test_driver_kubernetes.py index 9c08f63b5..81c5defe5 100644 --- a/nodepool/tests/unit/test_driver_kubernetes.py +++ b/nodepool/tests/unit/test_driver_kubernetes.py @@ -24,6 +24,7 @@ from nodepool.zk import zookeeper as zk class FakeCoreClient(object): def __init__(self): self.namespaces = [] + self._pod_requests = [] class FakeApi: class configuration: @@ -73,7 +74,7 @@ class FakeCoreClient(object): return FakeSecret def create_namespaced_pod(self, ns, pod_body): - return + self._pod_requests.append((ns, pod_body)) def read_namespaced_pod(self, name, ns): class FakePod: @@ -109,6 +110,7 @@ class TestDriverKubernetes(tests.DBTestCase): fake_get_client)) def test_kubernetes_machine(self): + # Test a pod with default values configfile = self.setup_config('kubernetes.yaml') pool = self.useNodepool(configfile, watermark_sleep=1) pool.start() @@ -133,12 +135,91 @@ class TestDriverKubernetes(tests.DBTestCase): {'key1': 'value1', 'key2': 'value2'}) self.assertEqual(node.cloud, 'admin-cluster.local') self.assertEqual(node.host_id, 'k8s-default-pool-abcd-1234') + ns, pod = self.fake_k8s_client._pod_requests[0] + self.assertEqual(pod['metadata'], { + 'name': 'pod-fedora', + 'labels': { + 'nodepool_node_id': '0000000000', + 'nodepool_provider_name': 'kubespray', + 'nodepool_pool_name': 'main', + 'nodepool_node_label': 'pod-fedora' + }, + }) + self.assertEqual(pod['spec'], { + 'containers': [{ + 'name': 'pod-fedora', + 'image': 'docker.io/fedora:28', + 'imagePullPolicy': 'IfNotPresent', + 'command': ['/bin/sh', '-c'], + 'args': ['while true; do sleep 30; done;'], + 'env': [] + }], + }) node.state = zk.DELETING self.zk.storeNode(node) self.waitForNodeDeletion(node) + def test_kubernetes_machine_extra(self): + # Test a pod with lots of extra settings + configfile = self.setup_config('kubernetes.yaml') + pool = self.useNodepool(configfile, watermark_sleep=1) + pool.start() + req = zk.NodeRequest() + req.state = zk.REQUESTED + req.tenant_name = 'tenant-1' + req.node_types.append('pod-extra') + self.zk.storeNodeRequest(req) + + self.log.debug("Waiting for request %s", req.id) + req = self.waitForNodeRequest(req) + self.assertEqual(req.state, zk.FULFILLED) + + self.assertNotEqual(req.nodes, []) + node = self.zk.getNode(req.nodes[0]) + self.assertEqual(node.allocated_to, req.id) + self.assertEqual(node.state, zk.READY) + self.assertIsNotNone(node.launcher) + self.assertEqual(node.connection_type, 'kubectl') + self.assertEqual(node.connection_port.get('token'), 'fake-token') + self.assertEqual(node.attributes, + {'key1': 'value1', 'key2': 'value2'}) + self.assertEqual(node.cloud, 'admin-cluster.local') + self.assertEqual(node.host_id, 'k8s-default-pool-abcd-1234') + ns, pod = self.fake_k8s_client._pod_requests[0] + self.assertEqual(pod['metadata'], { + 'name': 'pod-extra', + 'labels': { + 'environment': 'qa', + 'nodepool_node_id': '0000000000', + 'nodepool_provider_name': 'kubespray', + 'nodepool_pool_name': 'main', + 'nodepool_node_label': 'pod-extra' + }, + }) + self.assertEqual(pod['spec'], { + 'containers': [{ + 'args': ['while true; do sleep 30; done;'], + 'command': ['/bin/sh', '-c'], + 'env': [], + 'image': 'docker.io/fedora:28', + 'imagePullPolicy': 'IfNotPresent', + 'name': 'pod-extra', + 'securityContext': {'privileged': True}, + 'volumeMounts': [{ + 'mountPath': '/data', + 'name': 'my-csi-inline-vol' + }], + }], + 'nodeSelector': {'storageType': 'ssd'}, + 'schedulerName': 'myscheduler', + 'volumes': [{ + 'csi': {'driver': 'inline.storage.kubernetes.io'}, + 'name': 'my-csi-inline-vol' + }], + }) + def test_kubernetes_native(self): configfile = self.setup_config('kubernetes.yaml') pool = self.useNodepool(configfile, watermark_sleep=1) diff --git a/nodepool/tests/unit/test_driver_openshift.py b/nodepool/tests/unit/test_driver_openshift.py index 011ec778f..97d5af4c8 100644 --- a/nodepool/tests/unit/test_driver_openshift.py +++ b/nodepool/tests/unit/test_driver_openshift.py @@ -92,6 +92,9 @@ class FakeOpenshiftClient(object): class FakeCoreClient(object): + def __init__(self): + self._pod_requests = [] + def create_namespaced_service_account(self, ns, sa_body): return @@ -108,7 +111,7 @@ class FakeCoreClient(object): return FakeSecret def create_namespaced_pod(self, ns, pod_body): - return + self._pod_requests.append((ns, pod_body)) def read_namespaced_pod(self, name, ns): class FakePod: @@ -136,6 +139,7 @@ class TestDriverOpenshift(tests.DBTestCase): fake_get_client)) def test_openshift_machine(self): + # Test a pod with default values configfile = self.setup_config('openshift.yaml') pool = self.useNodepool(configfile, watermark_sleep=1) pool.start() @@ -149,6 +153,61 @@ class TestDriverOpenshift(tests.DBTestCase): req = self.waitForNodeRequest(req) self.assertEqual(req.state, zk.FULFILLED) + self.assertNotEqual(req.nodes, []) + node = self.zk.getNode(req.nodes[0]) + self.assertEqual(node.allocated_to, req.id) + self.assertEqual(node.state, zk.READY) + self.assertIsNotNone(node.launcher) + self.assertEqual(node.connection_type, 'kubectl') + self.assertEqual(node.connection_port.get('token'), 'fake-token') + self.assertEqual(node.python_path, 'auto') + self.assertEqual(node.shell_type, None) + self.assertEqual(node.attributes, + {'key1': 'value1', 'key2': 'value2'}) + self.assertEqual(node.cloud, 'admin-cluster.local') + self.assertIsNone(node.host_id) + ns, pod = self.fake_k8s_client._pod_requests[0] + self.assertEqual(pod['metadata'], { + 'name': 'pod-fedora', + 'labels': { + 'nodepool_node_id': '0000000000', + 'nodepool_provider_name': 'openshift', + 'nodepool_pool_name': 'main', + 'nodepool_node_label': 'pod-fedora' + }, + }) + self.assertEqual(pod['spec'], { + 'containers': [{ + 'name': 'pod-fedora', + 'image': 'docker.io/fedora:28', + 'imagePullPolicy': 'IfNotPresent', + 'command': ['/bin/sh', '-c'], + 'args': ['while true; do sleep 30; done;'], + 'env': [] + }], + 'imagePullSecrets': [], + }) + + node.state = zk.DELETING + self.zk.storeNode(node) + + self.waitForNodeDeletion(node) + + def test_openshift_machine_extra(self): + # Test a pod with lots of extra settings + configfile = self.setup_config('openshift.yaml') + pool = self.useNodepool(configfile, watermark_sleep=1) + pool.start() + req = zk.NodeRequest() + req.state = zk.REQUESTED + req.tenant_name = 'tenant-1' + req.node_types.append('pod-extra') + self.zk.storeNodeRequest(req) + + self.log.debug("Waiting for request %s", req.id) + req = self.waitForNodeRequest(req) + self.assertEqual(req.state, zk.FULFILLED) + self.assertNotEqual(req.nodes, []) node = self.zk.getNode(req.nodes[0]) self.assertEqual(node.allocated_to, req.id) @@ -162,6 +221,39 @@ class TestDriverOpenshift(tests.DBTestCase): {'key1': 'value1', 'key2': 'value2'}) self.assertEqual(node.cloud, 'admin-cluster.local') self.assertIsNone(node.host_id) + ns, pod = self.fake_k8s_client._pod_requests[0] + self.assertEqual(pod['metadata'], { + 'name': 'pod-extra', + 'labels': { + 'environment': 'qa', + 'nodepool_node_id': '0000000000', + 'nodepool_provider_name': 'openshift', + 'nodepool_pool_name': 'main', + 'nodepool_node_label': 'pod-extra' + }, + }) + self.assertEqual(pod['spec'], { + 'containers': [{ + 'args': ['while true; do sleep 30; done;'], + 'command': ['/bin/sh', '-c'], + 'env': [], + 'image': 'docker.io/fedora:28', + 'imagePullPolicy': 'IfNotPresent', + 'name': 'pod-extra', + 'securityContext': {'privileged': True}, + 'volumeMounts': [{ + 'mountPath': '/data', + 'name': 'my-csi-inline-vol' + }], + }], + 'nodeSelector': {'storageType': 'ssd'}, + 'schedulerName': 'myscheduler', + 'imagePullSecrets': [], + 'volumes': [{ + 'csi': {'driver': 'inline.storage.kubernetes.io'}, + 'name': 'my-csi-inline-vol' + }], + }) node.state = zk.DELETING self.zk.storeNode(node) diff --git a/releasenotes/notes/pod-volumes-schedulers-labels-e04764f014b07424.yaml b/releasenotes/notes/pod-volumes-schedulers-labels-e04764f014b07424.yaml new file mode 100644 index 000000000..5e76b2b3b --- /dev/null +++ b/releasenotes/notes/pod-volumes-schedulers-labels-e04764f014b07424.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for specifying the scheduler name, additional metadata, and volume mounts in Kubernetes and OpenShift drivers.