Browse Source

Merge "Implement an OpenShift resource provider"

tags/3.5.0
Zuul 4 months ago
parent
commit
e459ffa0fd

+ 16
- 0
.zuul.yaml View File

@@ -139,6 +139,21 @@
139 139
     required-projects:
140 140
       - openstack-infra/nodepool
141 141
 
142
+- job:
143
+    description: |
144
+      Test that nodepool works with openshift.
145
+    name: nodepool-functional-openshift
146
+    pre-run: playbooks/nodepool-functional-openshift/pre.yaml
147
+    run: playbooks/nodepool-functional-openshift/run.yaml
148
+    nodeset:
149
+      nodes:
150
+        - name: cluster
151
+          label: centos-7
152
+        - name: launcher
153
+          label: fedora-28
154
+    required-projects:
155
+      - openstack-infra/nodepool
156
+
142 157
 - project:
143 158
     check:
144 159
       jobs:
@@ -154,6 +169,7 @@
154 169
         - nodepool-functional-py35-src:
155 170
             voting: false
156 171
         - nodepool-functional-k8s
172
+        - nodepool-functional-openshift
157 173
         - pbrx-build-container-images:
158 174
             vars:
159 175
               pbrx_prefix: zuul

+ 1
- 0
bindep.txt View File

@@ -13,3 +13,4 @@ musl-dev [compile test platform:apk]
13 13
 python3-dev [compile test platform:dpkg]
14 14
 python3-devel [compile test platform:rpm]
15 15
 zookeeperd [platform:dpkg test]
16
+zookeeper [platform:rpm test]

+ 136
- 0
doc/source/configuration.rst View File

@@ -352,6 +352,13 @@ Options
352 352
          kubernetes driver, see the separate section
353 353
          :attr:`providers.[kubernetes]`
354 354
 
355
+      .. value:: openshift
356
+
357
+         For details on the extra options required and provided by the
358
+         openshift driver, see the separate section
359
+         :attr:`providers.[openshift]`
360
+
361
+
355 362
 OpenStack Driver
356 363
 ----------------
357 364
 
@@ -1134,3 +1141,132 @@ Selecting the kubernetes driver adds the following options to the
1134 1141
          Only used by the
1135 1142
          :value:`providers.[kubernetes].labels.type.pod` label type;
1136 1143
          specifies the image name used by the pod.
1144
+
1145
+
1146
+Openshift Driver
1147
+----------------
1148
+
1149
+Selecting the openshift driver adds the following options to the
1150
+:attr:`providers` section of the configuration.
1151
+
1152
+.. attr-overview::
1153
+   :prefix: providers.[openshift]
1154
+   :maxdepth: 3
1155
+
1156
+.. attr:: providers.[openshift]
1157
+   :type: list
1158
+
1159
+   An Openshift provider's resources are partitioned into groups called `pool`
1160
+   (see :attr:`providers.[openshift].pools` for details), and within a pool,
1161
+   the node types which are to be made available are listed
1162
+   (see :attr:`providers.[openshift].labels` for details).
1163
+
1164
+   .. note:: For documentation purposes the option names are prefixed
1165
+             ``providers.[openshift]`` to disambiguate from other
1166
+             drivers, but ``[openshift]`` is not required in the
1167
+             configuration (e.g. below
1168
+             ``providers.[openshift].pools`` refers to the ``pools``
1169
+             key in the ``providers`` section when the ``openshift``
1170
+             driver is selected).
1171
+
1172
+   Example:
1173
+
1174
+   .. code-block:: yaml
1175
+
1176
+     providers:
1177
+       - name: cluster
1178
+         driver: openshift
1179
+         context: context-name
1180
+         pools:
1181
+           - name: main
1182
+             labels:
1183
+               - name: openshift-project
1184
+                 type: project
1185
+               - name: openshift-pod
1186
+                 type: pod
1187
+                 image: docker.io/fedora:28
1188
+
1189
+   .. attr:: context
1190
+      :required:
1191
+
1192
+      Name of the context configured in ``kube/config``.
1193
+
1194
+      Before using the driver, Nodepool services need a ``kube/config`` file
1195
+      manually installed with self-provisioner (the service account needs to
1196
+      be able to create project) context.
1197
+      Make sure the context is present in ``oc config get-contexts`` command
1198
+      output.
1199
+
1200
+   .. attr:: launch-retries
1201
+      :default: 3
1202
+
1203
+      The number of times to retry launching a node before considering
1204
+      the job failed.
1205
+
1206
+   .. attr:: max-projects
1207
+      :default: infinite
1208
+      :type: int
1209
+
1210
+      Maximum number of projects that can be used.
1211
+
1212
+   .. attr:: pools
1213
+      :type: list
1214
+
1215
+      A pool defines a group of resources from an Openshift provider.
1216
+
1217
+      .. attr:: name
1218
+         :required:
1219
+
1220
+         Project's name are prefixed with the pool's name.
1221
+
1222
+   .. attr:: labels
1223
+      :type: list
1224
+
1225
+      Each entry in a pool`s `labels` section indicates that the
1226
+      corresponding label is available for use in this pool.
1227
+
1228
+      Each entry is a dictionary with the following keys
1229
+
1230
+      .. attr:: name
1231
+         :required:
1232
+
1233
+         Identifier for this label; references an entry in the
1234
+         :attr:`labels` section.
1235
+
1236
+      .. attr:: type
1237
+
1238
+         The Openshift provider supports two types of labels:
1239
+
1240
+         .. value:: project
1241
+
1242
+            Project labels provide an empty project configured
1243
+            with a service account that can creates pods, services,
1244
+            configmaps, etc.
1245
+
1246
+         .. value:: pod
1247
+
1248
+            Pod labels provide a dedicated project with a single pod
1249
+            created using the
1250
+            :attr:`providers.[openshift].labels.image` parameter and it
1251
+            is configured with a service account that can exec and get
1252
+            the logs of the pod.
1253
+
1254
+      .. attr:: image
1255
+
1256
+         Only used by the
1257
+         :value:`providers.[openshift].labels.type.pod` label type;
1258
+         specifies the image name used by the pod.
1259
+
1260
+      .. attr:: cpu
1261
+         :type: int
1262
+
1263
+         Only used by the
1264
+         :value:`providers.[openshift].labels.type.pod` label type;
1265
+         specifies the amount of cpu to request for the pod.
1266
+
1267
+      .. attr:: memory
1268
+         :type: int
1269
+
1270
+         Only used by the
1271
+         :value:`providers.[openshift].labels.type.pod` label type;
1272
+         specifies the amount of memory in MB to request for the pod.

+ 37
- 0
nodepool/driver/openshift/__init__.py View File

@@ -0,0 +1,37 @@
1
+# Copyright 2018 Red Hat
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License");
4
+# you may not use this file except in compliance with the License.
5
+# You may obtain a copy of the License at
6
+#
7
+#    http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS,
11
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
+# implied.
13
+#
14
+# See the License for the specific language governing permissions and
15
+# limitations under the License.
16
+
17
+from nodepool.driver import Driver
18
+from nodepool.driver.openshift.config import OpenshiftProviderConfig
19
+from nodepool.driver.openshift.provider import OpenshiftProvider
20
+from openshift import config
21
+
22
+
23
+class OpenshiftDriver(Driver):
24
+    def __init__(self):
25
+        super().__init__()
26
+
27
+    def reset(self):
28
+        try:
29
+            config.load_kube_config(persist_config=True)
30
+        except FileNotFoundError:
31
+            pass
32
+
33
+    def getProviderConfig(self, provider):
34
+        return OpenshiftProviderConfig(self, provider)
35
+
36
+    def getProvider(self, provider_config, use_taskmanager):
37
+        return OpenshiftProvider(provider_config, use_taskmanager)

+ 128
- 0
nodepool/driver/openshift/config.py View File

@@ -0,0 +1,128 @@
1
+# Copyright 2018 Red Hat
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License");
4
+# you may not use this file except in compliance with the License.
5
+# You may obtain a copy of the License at
6
+#
7
+#    http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS,
11
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
+# implied.
13
+#
14
+# See the License for the specific language governing permissions and
15
+# limitations under the License.
16
+
17
+import math
18
+import voluptuous as v
19
+
20
+from nodepool.driver import ConfigPool
21
+from nodepool.driver import ConfigValue
22
+from nodepool.driver import ProviderConfig
23
+
24
+
25
+class OpenshiftLabel(ConfigValue):
26
+    def __eq__(self, other):
27
+        if isinstance(other, OpenshiftLabel):
28
+            return (other.name == self.name and
29
+                    other.type == self.type and
30
+                    other.image_pull == self.image_pull and
31
+                    other.image == self.image and
32
+                    other.cpu == self.cpu and
33
+                    other.memory == self.memory)
34
+        return False
35
+
36
+    def __repr__(self):
37
+        return "<OpenshiftLabel %s>" % self.name
38
+
39
+
40
+class OpenshiftPool(ConfigPool):
41
+    def __eq__(self, other):
42
+        if isinstance(other, OpenshiftPool):
43
+            return (super().__eq__(other) and
44
+                    other.name == self.name and
45
+                    other.labels == self.labels)
46
+        return False
47
+
48
+    def __repr__(self):
49
+        return "<OpenshiftPool %s>" % self.name
50
+
51
+    def load(self, pool_config, full_config):
52
+        super().load(pool_config)
53
+        self.name = pool_config['name']
54
+        self.labels = {}
55
+        for label in pool_config.get('labels', []):
56
+            pl = OpenshiftLabel()
57
+            pl.name = label['name']
58
+            pl.type = label['type']
59
+            pl.image = label.get('image')
60
+            pl.image_pull = label.get('image-pull', 'IfNotPresent')
61
+            pl.cpu = label.get('cpu')
62
+            pl.memory = label.get('memory')
63
+            pl.pool = self
64
+            self.labels[pl.name] = pl
65
+            full_config.labels[label['name']].pools.append(self)
66
+
67
+
68
+class OpenshiftProviderConfig(ProviderConfig):
69
+    def __init__(self, driver, provider):
70
+        self.driver_object = driver
71
+        self.__pools = {}
72
+        super().__init__(provider)
73
+
74
+    def __eq__(self, other):
75
+        if isinstance(other, OpenshiftProviderConfig):
76
+            return (super().__eq__(other) and
77
+                    other.context == self.context and
78
+                    other.pools == self.pools)
79
+        return False
80
+
81
+    @property
82
+    def pools(self):
83
+        return self.__pools
84
+
85
+    @property
86
+    def manage_images(self):
87
+        return False
88
+
89
+    def load(self, config):
90
+        self.launch_retries = int(self.provider.get('launch-retries', 3))
91
+        self.context = self.provider['context']
92
+        self.max_projects = self.provider.get('max-projects', math.inf)
93
+        for pool in self.provider.get('pools', []):
94
+            pp = OpenshiftPool()
95
+            pp.load(pool, config)
96
+            pp.provider = self
97
+            self.pools[pp.name] = pp
98
+
99
+    def getSchema(self):
100
+        openshift_label = {
101
+            v.Required('name'): str,
102
+            v.Required('type'): str,
103
+            'image': str,
104
+            'image-pull': str,
105
+            'cpu': int,
106
+            'memory': int,
107
+        }
108
+
109
+        pool = {
110
+            v.Required('name'): str,
111
+            v.Required('labels'): [openshift_label],
112
+        }
113
+
114
+        schema = ProviderConfig.getCommonSchemaDict()
115
+        schema.update({
116
+            v.Required('pools'): [pool],
117
+            v.Required('context'): str,
118
+            'launch-retries': int,
119
+            'max-projects': int,
120
+        })
121
+        return v.Schema(schema)
122
+
123
+    def getSupportedLabels(self, pool_name=None):
124
+        labels = set()
125
+        for pool in self.pools.values():
126
+            if not pool_name or (pool.name == pool_name):
127
+                labels.update(pool.labels.keys())
128
+        return labels

+ 138
- 0
nodepool/driver/openshift/handler.py View File

@@ -0,0 +1,138 @@
1
+# Copyright 2018 Red Hat
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+# not use this file except in compliance with the License. You may obtain
5
+# a copy of the License at
6
+#
7
+#      http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+# License for the specific language governing permissions and limitations
13
+# under the License.
14
+
15
+import logging
16
+
17
+from kazoo import exceptions as kze
18
+
19
+from nodepool import exceptions
20
+from nodepool import zk
21
+from nodepool.driver.utils import NodeLauncher
22
+from nodepool.driver import NodeRequestHandler
23
+
24
+
25
+class OpenShiftLauncher(NodeLauncher):
26
+    def __init__(self, handler, node, provider_config, provider_label):
27
+        super().__init__(handler.zk, node, provider_config)
28
+        self.handler = handler
29
+        self.zk = handler.zk
30
+        self.label = provider_label
31
+        self._retries = provider_config.launch_retries
32
+
33
+    def _launchLabel(self):
34
+        self.log.debug("Creating resource")
35
+        project = "%s-%s" % (self.handler.pool.name, self.node.id)
36
+        self.node.external_id = self.handler.manager.createProject(project)
37
+        self.zk.storeNode(self.node)
38
+
39
+        resource = self.handler.manager.prepareProject(project)
40
+        if self.label.type == "pod":
41
+            self.handler.manager.createPod(
42
+                project, self.label)
43
+            resource['pod'] = self.label.name
44
+            self.node.connection_type = "kubectl"
45
+            self.node.interface_ip = self.label.name
46
+        else:
47
+            self.node.connection_type = "project"
48
+
49
+        self.node.state = zk.READY
50
+        # NOTE: resource access token may be encrypted here
51
+        self.node.connection_port = resource
52
+        self.zk.storeNode(self.node)
53
+        self.log.info("Resource %s is ready", project)
54
+
55
+    def launch(self):
56
+        attempts = 1
57
+        while attempts <= self._retries:
58
+            try:
59
+                self._launchLabel()
60
+                break
61
+            except kze.SessionExpiredError:
62
+                # If we lost our ZooKeeper session, we've lost our node lock
63
+                # so there's no need to continue.
64
+                raise
65
+            except Exception as e:
66
+                if attempts <= self._retries:
67
+                    self.log.exception(
68
+                        "Launch attempt %d/%d failed for node %s:",
69
+                        attempts, self._retries, self.node.id)
70
+                # If we created an instance, delete it.
71
+                if self.node.external_id:
72
+                    self.handler.manager.cleanupNode(self.node.external_id)
73
+                    self.handler.manager.waitForNodeCleanup(
74
+                        self.node.external_id)
75
+                    self.node.external_id = None
76
+                    self.node.interface_ip = None
77
+                    self.zk.storeNode(self.node)
78
+                if 'exceeded quota' in str(e).lower():
79
+                    self.log.info("%s: quota exceeded", self.node.id)
80
+                    raise exceptions.QuotaException("Quota exceeded")
81
+                if attempts == self._retries:
82
+                    raise
83
+                attempts += 1
84
+
85
+
86
+class OpenshiftNodeRequestHandler(NodeRequestHandler):
87
+    log = logging.getLogger("nodepool.driver.openshift."
88
+                            "OpenshiftNodeRequestHandler")
89
+
90
+    def __init__(self, pw, request):
91
+        super().__init__(pw, request)
92
+        self._threads = []
93
+
94
+    @property
95
+    def alive_thread_count(self):
96
+        count = 0
97
+        for t in self._threads:
98
+            if t.isAlive():
99
+                count += 1
100
+        return count
101
+
102
+    def imagesAvailable(self):
103
+        return True
104
+
105
+    def launchesComplete(self):
106
+        '''
107
+        Check if all launch requests have completed.
108
+
109
+        When all of the Node objects have reached a final state (READY or
110
+        FAILED), we'll know all threads have finished the launch process.
111
+        '''
112
+        if not self._threads:
113
+            return True
114
+
115
+        # Give the NodeLaunch threads time to finish.
116
+        if self.alive_thread_count:
117
+            return False
118
+
119
+        node_states = [node.state for node in self.nodeset]
120
+
121
+        # NOTE: It very important that NodeLauncher always sets one of
122
+        # these states, no matter what.
123
+        if not all(s in (zk.READY, zk.FAILED, zk.ABORTED)
124
+                   for s in node_states):
125
+            return False
126
+
127
+        return True
128
+
129
+    def hasRemainingQuota(self, node_types):
130
+        if len(self.manager.listNodes()) + 1 > self.provider.max_projects:
131
+            return False
132
+        return True
133
+
134
+    def launch(self, node):
135
+        label = self.pool.labels[node.type[0]]
136
+        thd = OpenShiftLauncher(self, node, self.provider, label)
137
+        thd.start()
138
+        self._threads.append(thd)

+ 237
- 0
nodepool/driver/openshift/provider.py View File

@@ -0,0 +1,237 @@
1
+# Copyright 2018 Red Hat
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+# not use this file except in compliance with the License. You may obtain
5
+# a copy of the License at
6
+#
7
+#      http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+# License for the specific language governing permissions and limitations
13
+# under the License.
14
+
15
+import logging
16
+import urllib3
17
+import time
18
+
19
+from kubernetes.config import config_exception as kce
20
+from kubernetes import client as k8s_client
21
+from openshift import client as os_client
22
+from openshift import config
23
+
24
+from nodepool import exceptions
25
+from nodepool.driver import Provider
26
+from nodepool.driver.openshift import handler
27
+
28
+urllib3.disable_warnings()
29
+
30
+
31
+class OpenshiftProvider(Provider):
32
+    log = logging.getLogger("nodepool.driver.openshift.OpenshiftProvider")
33
+
34
+    def __init__(self, provider, *args):
35
+        self.provider = provider
36
+        self.ready = False
37
+        try:
38
+            self.os_client, self.k8s_client = self._get_client(
39
+                provider.context)
40
+        except kce.ConfigException:
41
+            self.log.exception(
42
+                "Couldn't load context %s from config", provider.context)
43
+            self.os_client = None
44
+            self.k8s_client = None
45
+        self.project_names = set()
46
+        for pool in provider.pools.values():
47
+            self.project_names.add(pool.name)
48
+
49
+    def _get_client(self, context):
50
+        conf = config.new_client_from_config(context=context)
51
+        return (
52
+            os_client.OapiApi(conf),
53
+            k8s_client.CoreV1Api(conf))
54
+
55
+    def start(self, zk_conn):
56
+        self.log.debug("Starting")
57
+        if self.ready or not self.os_client or not self.k8s_client:
58
+            return
59
+        self.ready = True
60
+
61
+    def stop(self):
62
+        self.log.debug("Stopping")
63
+
64
+    def listNodes(self):
65
+        servers = []
66
+
67
+        class FakeServer:
68
+            def __init__(self, project, provider, valid_names):
69
+                self.id = project.metadata.name
70
+                self.name = project.metadata.name
71
+                self.metadata = {}
72
+
73
+                if [True for valid_name in valid_names
74
+                    if project.metadata.name.startswith("%s-" % valid_name)]:
75
+                    node_id = project.metadata.name.split('-')[-1]
76
+                    try:
77
+                        # Make sure last component of name is an id
78
+                        int(node_id)
79
+                        self.metadata['nodepool_provider_name'] = provider
80
+                        self.metadata['nodepool_node_id'] = node_id
81
+                    except Exception:
82
+                        # Probably not a managed project, let's skip metadata
83
+                        pass
84
+
85
+            def get(self, name, default=None):
86
+                return getattr(self, name, default)
87
+
88
+        if self.ready:
89
+            for project in self.os_client.list_project().items:
90
+                servers.append(FakeServer(
91
+                    project, self.provider.name, self.project_names))
92
+        return servers
93
+
94
+    def labelReady(self, name):
95
+        # Labels are always ready
96
+        return True
97
+
98
+    def join(self):
99
+        pass
100
+
101
+    def cleanupLeakedResources(self):
102
+        pass
103
+
104
+    def cleanupNode(self, server_id):
105
+        if not self.ready:
106
+            return
107
+        self.log.debug("%s: removing project" % server_id)
108
+        try:
109
+            self.os_client.delete_project(server_id)
110
+            self.log.info("%s: project removed" % server_id)
111
+        except Exception:
112
+            # TODO: implement better exception handling
113
+            self.log.exception("Couldn't remove project %s" % server_id)
114
+
115
+    def waitForNodeCleanup(self, server_id):
116
+        for retry in range(300):
117
+            try:
118
+                self.os_client.read_project(server_id)
119
+            except Exception:
120
+                break
121
+            time.sleep(1)
122
+
123
+    def createProject(self, project):
124
+        self.log.debug("%s: creating project" % project)
125
+        # Create the project
126
+        proj_body = {
127
+            'apiVersion': 'v1',
128
+            'kind': 'ProjectRequest',
129
+            'metadata': {
130
+                'name': project,
131
+            }
132
+        }
133
+        self.os_client.create_project_request(proj_body)
134
+        return project
135
+
136
+    def prepareProject(self, project):
137
+        user = "zuul-worker"
138
+
139
+        # Create the service account
140
+        sa_body = {
141
+            'apiVersion': 'v1',
142
+            'kind': 'ServiceAccount',
143
+            'metadata': {'name': user}
144
+        }
145
+        self.k8s_client.create_namespaced_service_account(project, sa_body)
146
+
147
+        # Wait for the token to be created
148
+        for retry in range(30):
149
+            sa = self.k8s_client.read_namespaced_service_account(
150
+                user, project)
151
+            token = None
152
+            if sa.secrets:
153
+                for secret_obj in sa.secrets:
154
+                    secret = self.k8s_client.read_namespaced_secret(
155
+                        secret_obj.name, project)
156
+                    token = secret.metadata.annotations.get(
157
+                        'openshift.io/token-secret.value')
158
+                    if token:
159
+                        break
160
+            if token:
161
+                break
162
+            time.sleep(1)
163
+        if not token:
164
+            raise exceptions.LaunchNodepoolException(
165
+                "%s: couldn't find token for service account %s" %
166
+                (project, sa))
167
+
168
+        # Give service account admin access
169
+        role_body = {
170
+            'apiVersion': 'v1',
171
+            'kind': 'RoleBinding',
172
+            'metadata': {'name': 'admin-0'},
173
+            'roleRef': {'name': 'admin'},
174
+            'subjects': [{
175
+                'kind': 'ServiceAccount',
176
+                'name': user,
177
+                'namespace': project,
178
+            }],
179
+            'userNames': ['system:serviceaccount:%s:zuul-worker' % project]
180
+        }
181
+        try:
182
+            self.os_client.create_namespaced_role_binding(project, role_body)
183
+        except ValueError:
184
+            # https://github.com/ansible/ansible/issues/36939
185
+            pass
186
+
187
+        resource = {
188
+            'namespace': project,
189
+            'host': self.os_client.api_client.configuration.host,
190
+            'skiptls': not self.os_client.api_client.configuration.verify_ssl,
191
+            'token': token,
192
+            'user': user,
193
+        }
194
+        self.log.info("%s: project created" % project)
195
+        return resource
196
+
197
+    def createPod(self, project, label):
198
+        spec_body = {
199
+            'name': label.name,
200
+            'image': label.image,
201
+            'imagePullPolicy': label.image_pull,
202
+            'command': ["/bin/bash", "-c", "--"],
203
+            'args': ["while true; do sleep 30; done;"],
204
+            'workingDir': '/tmp',
205
+        }
206
+        if label.cpu or label.memory:
207
+            spec_body['resources'] = {}
208
+            for rtype in ('requests', 'limits'):
209
+                rbody = {}
210
+                if label.cpu:
211
+                    rbody['cpu'] = int(label.cpu)
212
+                if label.memory:
213
+                    rbody['memory'] = '%dMi' % int(label.memory)
214
+                spec_body['resources'][rtype] = rbody
215
+        pod_body = {
216
+            'apiVersion': 'v1',
217
+            'kind': 'Pod',
218
+            'metadata': {'name': label.name},
219
+            'spec': {
220
+                'containers': [spec_body],
221
+            },
222
+            'restartPolicy': 'Never',
223
+        }
224
+        self.k8s_client.create_namespaced_pod(project, pod_body)
225
+        for retry in range(300):
226
+            pod = self.k8s_client.read_namespaced_pod(label.name, project)
227
+            if pod.status.phase == "Running":
228
+                break
229
+            self.log.debug("%s: pod status is %s", project, pod.status.phase)
230
+            time.sleep(1)
231
+        if retry == 299:
232
+            raise exceptions.LaunchNodepoolException(
233
+                "%s: pod failed to initialize (%s)" % (
234
+                    project, pod.status.phase))
235
+
236
+    def getRequestHandler(self, poolworker, request):
237
+        return handler.OpenshiftNodeRequestHandler(poolworker, request)

+ 2
- 1
nodepool/tests/__init__.py View File

@@ -328,7 +328,7 @@ class DBTestCase(BaseTestCase):
328 328
         self.log = logging.getLogger("tests")
329 329
         self.setupZK()
330 330
 
331
-    def setup_config(self, filename, images_dir=None):
331
+    def setup_config(self, filename, images_dir=None, context_name=None):
332 332
         if images_dir is None:
333 333
             images_dir = fixtures.TempDir()
334 334
             self.useFixture(images_dir)
@@ -341,6 +341,7 @@ class DBTestCase(BaseTestCase):
341 341
             config = conf_fd.read().decode('utf8')
342 342
             data = config.format(images_dir=images_dir.path,
343 343
                                  build_log_dir=build_log_dir.path,
344
+                                 context_name=context_name,
344 345
                                  zookeeper_host=self.zookeeper_host,
345 346
                                  zookeeper_port=self.zookeeper_port,
346 347
                                  zookeeper_chroot=self.zookeeper_chroot)

+ 16
- 0
nodepool/tests/fixtures/config_validate/good.yaml View File

@@ -21,6 +21,8 @@ labels:
21 21
   - name: trusty-static
22 22
   - name: kubernetes-namespace
23 23
   - name: pod-fedora
24
+  - name: openshift-project
25
+  - name: openshift-pod
24 26
 
25 27
 providers:
26 28
   - name: cloud1
@@ -116,6 +118,20 @@ providers:
116 118
             type: pod
117 119
             image: docker.io/fedora:28
118 120
 
121
+  - name: openshift
122
+    driver: openshift
123
+    context: "/hostname:8443/self-provisioner-service-account"
124
+    pools:
125
+      - name: main
126
+        labels:
127
+          - name: openshift-project
128
+            type: project
129
+          - name: openshift-pod
130
+            type: pod
131
+            image: docker.io/fedora:28
132
+            memory: 512
133
+            cpu: 2
134
+
119 135
 diskimages:
120 136
   - name: trusty
121 137
     formats:

+ 23
- 0
nodepool/tests/fixtures/functional/openshift/basic.yaml View File

@@ -0,0 +1,23 @@
1
+zookeeper-servers:
2
+  - host: {zookeeper_host}
3
+    port: {zookeeper_port}
4
+    chroot: {zookeeper_chroot}
5
+
6
+labels:
7
+  - name: openshift-project
8
+    min-ready: 1
9
+  - name: openshift-pod
10
+    min-ready: 1
11
+
12
+providers:
13
+  - name: openshift
14
+    driver: openshift
15
+    context: {context_name}
16
+    pools:
17
+      - name: main
18
+        labels:
19
+          - name: openshift-project
20
+            type: project
21
+          - name: openshift-pod
22
+            type: pod
23
+            image: docker.io/fedora:28

+ 21
- 0
nodepool/tests/fixtures/openshift.yaml View File

@@ -0,0 +1,21 @@
1
+zookeeper-servers:
2
+  - host: {zookeeper_host}
3
+    port: {zookeeper_port}
4
+    chroot: {zookeeper_chroot}
5
+
6
+labels:
7
+  - name: pod-fedora
8
+  - name: openshift-project
9
+
10
+providers:
11
+  - name: openshift
12
+    driver: openshift
13
+    context: admin-cluster.local
14
+    pools:
15
+      - name: main
16
+        labels:
17
+          - name: openshift-project
18
+            type: project
19
+          - name: pod-fedora
20
+            type: pod
21
+            image: docker.io/fedora:28

+ 0
- 0
nodepool/tests/functional/openshift/__init__.py View File


+ 50
- 0
nodepool/tests/functional/openshift/test_openshift.py View File

@@ -0,0 +1,50 @@
1
+# Copyright (C) 2018 Red Hat
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License");
4
+# you may not use this file except in compliance with the License.
5
+# You may obtain a copy of the License at
6
+#
7
+#    http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS,
11
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
+# implied.
13
+# See the License for the specific language governing permissions and
14
+# limitations under the License.
15
+
16
+import logging
17
+import os
18
+
19
+import yaml
20
+
21
+from nodepool import tests
22
+
23
+
24
+class TestOpenShift(tests.DBTestCase):
25
+    log = logging.getLogger("nodepool.TestOpenShift")
26
+
27
+    def setup_config(self, filename):
28
+        adjusted_filename = "functional/openshift/" + filename
29
+        # Openshift context name are not hardcoded,
30
+        # discover the name setup by oc login
31
+        kubecfg = yaml.safe_load(open(os.path.expanduser("~/.kube/config")))
32
+        try:
33
+            ctx_name = kubecfg['contexts'][0]['name']
34
+        except IndexError:
35
+            raise RuntimeError("Run oc login first")
36
+        self.log.debug("Using %s context name", ctx_name)
37
+        return super().setup_config(adjusted_filename, context_name=ctx_name)
38
+
39
+    def test_basic(self):
40
+        configfile = self.setup_config('basic.yaml')
41
+        pool = self.useNodepool(configfile, watermark_sleep=1)
42
+        pool.start()
43
+
44
+        nodes = self.waitForNodes("openshift-project", 1)
45
+        self.assertEqual(1, len(nodes))
46
+        self.assertEqual(nodes[0].connection_type, "project")
47
+
48
+        nodes = self.waitForNodes("openshift-pod", 1)
49
+        self.assertEqual(1, len(nodes))
50
+        self.assertEqual(nodes[0].connection_type, "kubectl")

+ 153
- 0
nodepool/tests/unit/test_driver_openshift.py View File

@@ -0,0 +1,153 @@
1
+# Copyright (C) 2018 Red Hat
2
+#
3
+# Licensed under the Apache License, Version 2.0 (the "License");
4
+# you may not use this file except in compliance with the License.
5
+# You may obtain a copy of the License at
6
+#
7
+#    http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+# Unless required by applicable law or agreed to in writing, software
10
+# distributed under the License is distributed on an "AS IS" BASIS,
11
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12
+# implied.
13
+# See the License for the specific language governing permissions and
14
+# limitations under the License.
15
+
16
+import fixtures
17
+import logging
18
+
19
+from nodepool import tests
20
+from nodepool import zk
21
+from nodepool.driver.openshift import provider
22
+
23
+
24
+class FakeOpenshiftClient(object):
25
+    def __init__(self):
26
+        self.projects = []
27
+
28
+        class FakeApi:
29
+            class configuration:
30
+                host = "http://localhost:8080"
31
+                verify_ssl = False
32
+        self.api_client = FakeApi()
33
+
34
+    def list_project(self):
35
+        class FakeProjects:
36
+            items = self.projects
37
+        return FakeProjects
38
+
39
+    def create_project_request(self, proj_body):
40
+        class FakeProject:
41
+            class metadata:
42
+                name = proj_body['metadata']['name']
43
+        self.projects.append(FakeProject)
44
+        return FakeProject
45
+
46
+    def delete_project(self, name):
47
+        to_delete = None
48
+        for project in self.projects:
49
+            if project.metadata.name == name:
50
+                to_delete = project
51
+                break
52
+        if not to_delete:
53
+            raise RuntimeError("Unknown project %s" % name)
54
+        self.projects.remove(to_delete)
55
+
56
+    def create_namespaced_role_binding(self, ns, role_binding_body):
57
+        return
58
+
59
+
60
+class FakeCoreClient(object):
61
+    def create_namespaced_service_account(self, ns, sa_body):
62
+        return
63
+
64
+    def read_namespaced_service_account(self, user, ns):
65
+        class FakeSA:
66
+            class secret:
67
+                name = "fake"
68
+        FakeSA.secrets = [FakeSA.secret]
69
+        return FakeSA
70
+
71
+    def read_namespaced_secret(self, name, ns):
72
+        class FakeSecret:
73
+            class metadata:
74
+                annotations = {'openshift.io/token-secret.value': 'fake-token'}
75
+        return FakeSecret
76
+
77
+    def create_namespaced_pod(self, ns, pod_body):
78
+        return
79
+
80
+    def read_namespaced_pod(self, name, ns):
81
+        class FakePod:
82
+            class status:
83
+                phase = "Running"
84
+        return FakePod
85
+
86
+
87
+class TestDriverOpenshift(tests.DBTestCase):
88
+    log = logging.getLogger("nodepool.TestDriverOpenshift")
89
+
90
+    def setUp(self):
91
+        super().setUp()
92
+        self.fake_os_client = FakeOpenshiftClient()
93
+        self.fake_k8s_client = FakeCoreClient()
94
+
95
+        def fake_get_client(*args):
96
+            return self.fake_os_client, self.fake_k8s_client
97
+
98
+        self.useFixture(fixtures.MockPatchObject(
99
+            provider.OpenshiftProvider, '_get_client',
100
+            fake_get_client
101
+        ))
102
+
103
+    def test_openshift_machine(self):
104
+        configfile = self.setup_config('openshift.yaml')
105
+        pool = self.useNodepool(configfile, watermark_sleep=1)
106
+        pool.start()
107
+        req = zk.NodeRequest()
108
+        req.state = zk.REQUESTED
109
+        req.node_types.append('pod-fedora')
110
+        self.zk.storeNodeRequest(req)
111
+
112
+        self.log.debug("Waiting for request %s", req.id)
113
+        req = self.waitForNodeRequest(req)
114
+        self.assertEqual(req.state, zk.FULFILLED)
115
+
116
+        self.assertNotEqual(req.nodes, [])
117
+        node = self.zk.getNode(req.nodes[0])
118
+        self.assertEqual(node.allocated_to, req.id)
119
+        self.assertEqual(node.state, zk.READY)
120
+        self.assertIsNotNone(node.launcher)
121
+        self.assertEqual(node.connection_type, 'kubectl')
122
+        self.assertEqual(node.connection_port.get('token'), 'fake-token')
123
+
124
+        node.state = zk.DELETING
125
+        self.zk.storeNode(node)
126
+
127
+        self.waitForNodeDeletion(node)
128
+
129
+    def test_openshift_native(self):
130
+        configfile = self.setup_config('openshift.yaml')
131
+        pool = self.useNodepool(configfile, watermark_sleep=1)
132
+        pool.start()
133
+        req = zk.NodeRequest()
134
+        req.state = zk.REQUESTED
135
+        req.node_types.append('openshift-project')
136
+        self.zk.storeNodeRequest(req)
137
+
138
+        self.log.debug("Waiting for request %s", req.id)
139
+        req = self.waitForNodeRequest(req)
140
+        self.assertEqual(req.state, zk.FULFILLED)
141
+
142
+        self.assertNotEqual(req.nodes, [])
143
+        node = self.zk.getNode(req.nodes[0])
144
+        self.assertEqual(node.allocated_to, req.id)
145
+        self.assertEqual(node.state, zk.READY)
146
+        self.assertIsNotNone(node.launcher)
147
+        self.assertEqual(node.connection_type, 'project')
148
+        self.assertEqual(node.connection_port.get('token'), 'fake-token')
149
+
150
+        node.state = zk.DELETING
151
+        self.zk.storeNode(node)
152
+
153
+        self.waitForNodeDeletion(node)

+ 32
- 0
playbooks/nodepool-functional-openshift/pre.yaml View File

@@ -0,0 +1,32 @@
1
+- name: Configure a multi node environment
2
+  hosts: all
3
+  tasks:
4
+    - name: Set up multi-node firewall
5
+      include_role:
6
+        name: multi-node-firewall
7
+
8
+    - name: Set up multi-node firewall
9
+      include_role:
10
+        name: multi-node-hosts-file
11
+
12
+- hosts: launcher
13
+  roles:
14
+    - role: bindep
15
+  tasks:
16
+    - name: Ensure nodepool services directories
17
+      file:
18
+        path: '{{ ansible_user_dir }}/{{ item }}'
19
+        state: directory
20
+      with_items:
21
+        - work/logs/nodepool
22
+        - work/etc
23
+        - work/images
24
+
25
+    - name: Ensure oc client is installed
26
+      package:
27
+        name: origin-clients
28
+      become: yes
29
+
30
+- hosts: cluster
31
+  roles:
32
+    - install-openshift

+ 26
- 0
playbooks/nodepool-functional-openshift/run.yaml View File

@@ -0,0 +1,26 @@
1
+- hosts: cluster
2
+  roles:
3
+    - deploy-openshift
4
+
5
+- hosts: launcher
6
+  pre_tasks:
7
+    - name: Login to the openshift cluster as developer
8
+      command: >
9
+          oc login -u developer -p developer --insecure-skip-tls-verify=true
10
+          https://{{ hostvars['cluster']['ansible_hostname'] }}:8443
11
+
12
+    # Zookeeper service doesn't start by default on fedora
13
+    - name: Setup zoo.cfg
14
+      command: cp /etc/zookeeper/zoo_sample.cfg /etc/zookeeper/zoo.cfg
15
+      become: yes
16
+      ignore_errors: yes
17
+
18
+    - name: Start zookeeper
19
+      service:
20
+        name: zookeeper
21
+        state: started
22
+      become: yes
23
+      ignore_errors: yes
24
+  roles:
25
+    - role: tox
26
+      tox_envlist: functional_openshift

+ 5
- 0
releasenotes/notes/openshift-driver-fdef4199b7b73fca.yaml View File

@@ -0,0 +1,5 @@
1
+---
2
+features:
3
+  - |
4
+    A new driver is available to support Openshift cluster as a resources provider
5
+    to enable project and pod request.

+ 5
- 0
tox.ini View File

@@ -55,6 +55,11 @@ commands = {posargs}
55 55
 commands = stestr --test-path ./nodepool/tests/functional/kubernetes run --no-subunit-trace {posargs}
56 56
            stestr slowest
57 57
 
58
+[testenv:functional_openshift]
59
+basepython = python3
60
+commands = stestr --test-path ./nodepool/tests/functional/openshift run --no-subunit-trace {posargs}
61
+           stestr slowest
62
+
58 63
 [flake8]
59 64
 # These are ignored intentionally in openstack-infra projects;
60 65
 # please don't submit patches that solely correct them or enable them.

Loading…
Cancel
Save