Browse Source

Refactor netchecker and calico tests

 * moved methods for netchecker setup/start/stop
   to the helpers;
 * added a possiblity to install netchecker to k8s
   cluster w/o fetching additional repositories;
 * resolved #TODO (work with nodes labes via API).

Change-Id: Ic6a8470ff53d7e95c36240d25d816db3c5a0d89d
Artem Panchenko 2 years ago
parent
commit
a1ac1910f5

+ 286
- 0
fuel_ccp_tests/helpers/netchecker.py View File

@@ -0,0 +1,286 @@
1
+#    Copyright 2016 Mirantis, Inc.
2
+#
3
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
4
+#    not use this file except in compliance with the License. You may obtain
5
+#    a copy of the License at
6
+#
7
+#         http://www.apache.org/licenses/LICENSE-2.0
8
+#
9
+#    Unless required by applicable law or agreed to in writing, software
10
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12
+#    License for the specific language governing permissions and limitations
13
+#    under the License.
14
+
15
+import requests
16
+
17
+from devops.helpers import helpers
18
+from k8sclient.client import rest
19
+
20
+from fuel_ccp_tests import logger
21
+from fuel_ccp_tests import settings
22
+from fuel_ccp_tests.helpers import utils
23
+
24
+
25
+LOG = logger.logger
26
+
27
+
28
+NETCHECKER_CONTAINER_PORT = NETCHECKER_SERVICE_PORT = 8081
29
+NETCHECKER_NODE_PORT = 31081
30
+NETCHECKER_REPORT_INTERVAL = 30
31
+
32
+NETCHECKER_POD_CFG = {
33
+    "apiVersion": "v1",
34
+    "kind": "Pod",
35
+    "metadata": {
36
+        "labels": {
37
+            "app": "netchecker-server"
38
+        },
39
+        "name": "netchecker-server"
40
+    },
41
+    "spec": {
42
+        "containers": [
43
+            {
44
+                "env": None,
45
+                "image": "127.0.0.1:31500/netchecker/server:latest",
46
+                "imagePullPolicy": "Always",
47
+                "name": "netchecker-server",
48
+                "ports": [
49
+                    {
50
+                        "containerPort": NETCHECKER_CONTAINER_PORT,
51
+                        "hostPort": NETCHECKER_CONTAINER_PORT
52
+                    }
53
+                ]
54
+            },
55
+            {
56
+                "args": [
57
+                    "proxy"
58
+                ],
59
+                "image": ("gcr.io/google_containers/kubectl:"
60
+                          "v0.18.0-120-gaeb4ac55ad12b1-dirty"),
61
+                "imagePullPolicy": "Always",
62
+                "name": "kubectl-proxy"
63
+            }
64
+        ]
65
+    }
66
+}
67
+
68
+NETCHECKER_SVC_CFG = {
69
+    "apiVersion": "v1",
70
+    "kind": "Service",
71
+    "metadata": {
72
+        "name": "netchecker-service"
73
+    },
74
+    "spec": {
75
+        "ports": [
76
+            {
77
+                "nodePort": NETCHECKER_NODE_PORT,
78
+                "port": NETCHECKER_SERVICE_PORT,
79
+                "protocol": "TCP",
80
+                "targetPort": NETCHECKER_CONTAINER_PORT
81
+            }
82
+        ],
83
+        "selector": {
84
+            "app": "netchecker-server"
85
+        },
86
+        "type": "NodePort"
87
+    }
88
+}
89
+
90
+NETCHECKER_DS_CFG = [
91
+    {
92
+        "apiVersion": "extensions/v1beta1",
93
+        "kind": "DaemonSet",
94
+        "metadata": {
95
+            "labels": {
96
+                "app": "netchecker-agent-hostnet"
97
+            },
98
+            "name": "netchecker-agent"
99
+        },
100
+        "spec": {
101
+            "template": {
102
+                "metadata": {
103
+                    "labels": {
104
+                        "app": "netchecker-agent"
105
+                    },
106
+                    "name": "netchecker-agent"
107
+                },
108
+                "spec": {
109
+                    "containers": [
110
+                        {
111
+                            "env": [
112
+                                {
113
+                                    "name": "MY_POD_NAME",
114
+                                    "valueFrom": {
115
+                                        "fieldRef": {
116
+                                            "fieldPath": "metadata.name"
117
+                                        }
118
+                                    }
119
+                                },
120
+                                {
121
+                                    "name": "REPORT_INTERVAL",
122
+                                    "value": str(NETCHECKER_REPORT_INTERVAL)
123
+                                }
124
+                            ],
125
+                            "image": "127.0.0.1:31500/netchecker/agent:latest",
126
+                            "imagePullPolicy": "Always",
127
+                            "name": "netchecker-agent"
128
+                        }
129
+                    ],
130
+                    "nodeSelector": {
131
+                        "netchecker": "agent"
132
+                    }
133
+                }
134
+            }
135
+        }
136
+    },
137
+    {
138
+        "apiVersion": "extensions/v1beta1",
139
+        "kind": "DaemonSet",
140
+        "metadata": {
141
+            "labels": {
142
+                "app": "netchecker-agent-hostnet"
143
+            },
144
+            "name": "netchecker-agent-hostnet"
145
+        },
146
+        "spec": {
147
+            "template": {
148
+                "metadata": {
149
+                    "labels": {
150
+                        "app": "netchecker-agent-hostnet"
151
+                    },
152
+                    "name": "netchecker-agent-hostnet"
153
+                },
154
+                "spec": {
155
+                    "containers": [
156
+                        {
157
+                            "env": [
158
+                                {
159
+                                    "name": "MY_POD_NAME",
160
+                                    "valueFrom": {
161
+                                        "fieldRef": {
162
+                                            "fieldPath": "metadata.name"
163
+                                        }
164
+                                    }
165
+                                },
166
+                                {
167
+                                    "name": "REPORT_INTERVAL",
168
+                                    "value": str(NETCHECKER_REPORT_INTERVAL)
169
+                                }
170
+                            ],
171
+                            "image": "127.0.0.1:31500/netchecker/agent:latest",
172
+                            "imagePullPolicy": "Always",
173
+                            "name": "netchecker-agent"
174
+                        }
175
+                    ],
176
+                    "hostNetwork": True,
177
+                    "nodeSelector": {
178
+                        "netchecker": "agent"
179
+                    }
180
+                }
181
+            }
182
+        }
183
+    }
184
+]
185
+
186
+
187
+def start_server(k8s, namespace=None,
188
+                 pod_spec=NETCHECKER_POD_CFG,
189
+                 svc_spec=NETCHECKER_SVC_CFG):
190
+    """Start netchecker server in k8s cluster
191
+
192
+    :param k8s: K8SManager
193
+    :param namespace: str
194
+    :param pod_spec: dict
195
+    :param svc_spec: dict
196
+    :return: None
197
+    """
198
+    for container in pod_spec['spec']['containers']:
199
+        if container['name'] == 'netchecker-server':
200
+            container['image'] = '{0}:{1}'.format(
201
+                settings.MCP_NETCHECKER_SERVER_IMAGE_REPO,
202
+                settings.MCP_NETCHECKER_SERVER_VERSION)
203
+    try:
204
+        if k8s.api.pods.get(name=pod_spec['metadata']['name']):
205
+            LOG.debug('Network checker server pod {} is '
206
+                      'already running! Skipping resource creation'
207
+                      '.'.format(pod_spec['metadata']['name']))
208
+    except rest.ApiException as e:
209
+        if e.status == 404:
210
+            k8s.check_pod_create(body=pod_spec, namespace=namespace)
211
+        else:
212
+            raise e
213
+
214
+    try:
215
+        if k8s.api.services.get(name=svc_spec['metadata']['name']):
216
+            LOG.debug('Network checker server service {} is '
217
+                      'already running! Skipping resource creation'
218
+                      '.'.format(svc_spec['metadata']['name']))
219
+    except rest.ApiException as e:
220
+        if e.status == 404:
221
+            k8s.check_service_create(body=svc_spec, namespace=namespace)
222
+        else:
223
+            raise e
224
+
225
+
226
+def start_agent(k8s, namespace=None, ds_spec=NETCHECKER_DS_CFG):
227
+    """Start netchecker agent in k8s cluster
228
+
229
+    :param k8s:
230
+    :param namespace:
231
+    :param ds_spec:
232
+    :return:
233
+    """
234
+    for k8s_node in k8s.api.nodes.list():
235
+        k8s_node.add_labels({'netchecker': 'agent'})
236
+
237
+    for ds in ds_spec:
238
+        for container in (ds['spec']['template']['spec']['containers']):
239
+            if container['name'] == 'netchecker-agent':
240
+                container['image'] = '{0}:{1}'.format(
241
+                    settings.MCP_NETCHECKER_AGENT_IMAGE_REPO,
242
+                    settings.MCP_NETCHECKER_AGENT_VERSION)
243
+        k8s.check_ds_create(body=ds, namespace=namespace)
244
+        k8s.wait_ds_ready(dsname=ds['metadata']['name'], namespace=namespace)
245
+
246
+
247
+@utils.retry(3, requests.exceptions.RequestException)
248
+def get_status(kube_host_ip, netchecker_pod_port=NETCHECKER_NODE_PORT):
249
+    net_status_url = 'http://{0}:{1}/api/v1/connectivity_check'.format(
250
+        kube_host_ip, netchecker_pod_port)
251
+    return requests.get(net_status_url, timeout=5)
252
+
253
+
254
+def wait_running(kube_host_ip, timeout=120, interval=5):
255
+    helpers.wait_pass(
256
+        lambda: get_status(kube_host_ip),
257
+        timeout=timeout, interval=interval)
258
+
259
+
260
+def check_network(kube_host_ip, works=True):
261
+    if works:
262
+        assert get_status(kube_host_ip).status_code in (200, 204)
263
+    else:
264
+        assert get_status(kube_host_ip).status_code == 400
265
+
266
+
267
+def wait_check_network(kube_host_ip, works=True, timeout=120, interval=5):
268
+    helpers.wait_pass(lambda: check_network(kube_host_ip, works=works),
269
+                      timeout=timeout, interval=interval)
270
+
271
+
272
+def calico_block_traffic_on_node(underlay, target_node):
273
+    LOG.info('Blocked traffic to the network checker service from '
274
+             'containers on node "{}".'.format(target_node))
275
+    underlay.sudo_check_call(
276
+        'calicoctl profile calico-k8s-network rule add --at=1 outbound '
277
+        'deny tcp to ports {0}'.format(NETCHECKER_SERVICE_PORT),
278
+        node_name=target_node)
279
+
280
+
281
+def calico_unblock_traffic_on_node(underlay, target_node):
282
+    LOG.info('Unblocked traffic to the network checker service from '
283
+             'containers on node "{}".'.format(target_node))
284
+    underlay.sudo_check_call(
285
+        'calicoctl profile calico-k8s-network rule remove outbound --at=1',
286
+        node_name=target_node)

+ 10
- 10
fuel_ccp_tests/managers/k8smanager.py View File

@@ -214,11 +214,11 @@ class K8SManager(object):
214 214
                                  '"{phase}" phase'.format(
215 215
                                      pod_name=pod_name, phase=phase))
216 216
 
217
-    def check_pod_create(self, body, timeout=300, interval=5):
217
+    def check_pod_create(self, body, namespace=None, timeout=300, interval=5):
218 218
         """Check creating sample pod
219 219
 
220 220
         :param k8s_pod: V1Pod
221
-        :param k8sclient: K8sCluster
221
+        :param namespace: str
222 222
         :rtype: V1Pod
223 223
         """
224 224
         LOG.info("Creating pod in k8s cluster")
@@ -228,10 +228,10 @@ class K8SManager(object):
228 228
         )
229 229
         LOG.debug("Timeout for creation is set to {}".format(timeout))
230 230
         LOG.debug("Checking interval is set to {}".format(interval))
231
-        pod = self.api.pods.create(body=body)
231
+        pod = self.api.pods.create(body=body, namespace=namespace)
232 232
         pod.wait_running(timeout=300, interval=5)
233 233
         LOG.info("Pod '{}' is created".format(pod.metadata.name))
234
-        return self.api.pods.get(name=pod.metadata.name)
234
+        return self.api.pods.get(name=pod.metadata.name, namespace=namespace)
235 235
 
236 236
     def wait_pod_deleted(self, podname, timeout=60, interval=5):
237 237
         helpers.wait(
@@ -255,11 +255,11 @@ class K8SManager(object):
255 255
         self.wait_pod_deleted(k8s_pod.name, timeout, interval)
256 256
         LOG.debug("Pod '{}' is deleted".format(k8s_pod.name))
257 257
 
258
-    def check_service_create(self, body):
258
+    def check_service_create(self, body, namespace=None):
259 259
         """Check creating k8s service
260 260
 
261 261
         :param body: dict, service spec
262
-        :param k8sclient: K8sCluster object
262
+        :param namespace: str
263 263
         :rtype: K8sService object
264 264
         """
265 265
         LOG.info("Creating service in k8s cluster")
@@ -267,15 +267,15 @@ class K8SManager(object):
267 267
             "Service spec to create:\n{}".format(
268 268
                 yaml.dump(body, default_flow_style=False))
269 269
         )
270
-        service = self.api.services.create(body=body)
270
+        service = self.api.services.create(body=body, namespace=namespace)
271 271
         LOG.info("Service '{}' is created".format(service.metadata.name))
272 272
         return self.api.services.get(name=service.metadata.name)
273 273
 
274
-    def check_ds_create(self, body):
274
+    def check_ds_create(self, body, namespace=None):
275 275
         """Check creating k8s DaemonSet
276 276
 
277 277
         :param body: dict, DaemonSet spec
278
-        :param k8sclient: K8sCluster object
278
+        :param namespace: str
279 279
         :rtype: K8sDaemonSet object
280 280
         """
281 281
         LOG.info("Creating DaemonSet in k8s cluster")
@@ -283,7 +283,7 @@ class K8SManager(object):
283 283
             "DaemonSet spec to create:\n{}".format(
284 284
                 yaml.dump(body, default_flow_style=False))
285 285
         )
286
-        ds = self.api.daemonsets.create(body=body)
286
+        ds = self.api.daemonsets.create(body=body, namespace=namespace)
287 287
         LOG.info("DaemonSet '{}' is created".format(ds.metadata.name))
288 288
         return self.api.daemonsets.get(name=ds.metadata.name)
289 289
 

+ 6
- 4
fuel_ccp_tests/settings.py View File

@@ -245,13 +245,15 @@ NETCHECKER_AGENT_DIR = os.environ.get(
245 245
     'NETCHECKER_AGENT_DIR', os.path.join(os.getcwd(), 'mcp-netchecker-agent')
246 246
 )
247 247
 MCP_NETCHECKER_AGENT_IMAGE_REPO = os.environ.get(
248
-    'MCP_NETCHECKER_AGENT_IMAGE_REPO')
248
+    'MCP_NETCHECKER_AGENT_IMAGE_REPO',
249
+    'quay.io/l23network/mcp-netchecker-agent')
249 250
 MCP_NETCHECKER_AGENT_VERSION = os.environ.get(
250
-    'MCP_NETCHECKER_AGENT_VERSION')
251
+    'MCP_NETCHECKER_AGENT_VERSION', 'latest')
251 252
 MCP_NETCHECKER_SERVER_IMAGE_REPO = os.environ.get(
252
-    'MCP_NETCHECKER_SERVER_IMAGE_REPO')
253
+    'MCP_NETCHECKER_SERVER_IMAGE_REPO',
254
+    'quay.io/l23network/mcp-netchecker-server')
253 255
 MCP_NETCHECKER_SERVER_VERSION = os.environ.get(
254
-    'MCP_NETCHECKER_SERVER_VERSION')
256
+    'MCP_NETCHECKER_SERVER_VERSION', 'latest')
255 257
 
256 258
 # Settings for AppController testing
257 259
 # AC_PATH - path to k8s-AppController repo

+ 6
- 10
fuel_ccp_tests/tests/system/test_calico.py View File

@@ -15,18 +15,14 @@
15 15
 import pytest
16 16
 
17 17
 import base_test
18
-import test_netchecker
19
-
20 18
 from fuel_ccp_tests import logger
19
+from fuel_ccp_tests.helpers import netchecker
21 20
 
22 21
 LOG = logger.logger
23 22
 
24 23
 
25
-@pytest.mark.usefixtures("check_netchecker_files")
26
-@pytest.mark.usefixtures("check_netchecker_images_settings")
27 24
 @pytest.mark.usefixtures("check_calico_images_settings")
28
-class TestFuelCCPCalico(base_test.SystemBaseTest,
29
-                        test_netchecker.TestFuelCCPNetCheckerMixin):
25
+class TestFuelCCPCalico(base_test.SystemBaseTest):
30 26
     """Test class for Calico network plugin in k8s"""
31 27
 
32 28
     @pytest.mark.fail_snapshot
@@ -52,13 +48,13 @@ class TestFuelCCPCalico(base_test.SystemBaseTest,
52 48
 
53 49
         # STEP #2
54 50
         show_step(2)
55
-        self.start_netchecker_server(k8s=k8scluster)
56
-        self.wait_netchecker_running(config.k8s.kube_host, timeout=240)
51
+        netchecker.start_server(k8s=k8scluster)
52
+        netchecker.wait_running(config.k8s.kube_host, timeout=240)
57 53
 
58 54
         # STEP #3
59 55
         show_step(3)
60
-        self.start_netchecker_agent(underlay, k8scluster)
56
+        netchecker.start_agent(k8s=k8scluster)
61 57
 
62 58
         # STEP #4
63 59
         show_step(4)
64
-        self.wait_check_network(config.k8s.kube_host, works=True)
60
+        netchecker.wait_check_network(config.k8s.kube_host, works=True)

+ 27
- 107
fuel_ccp_tests/tests/system/test_netchecker.py View File

@@ -14,15 +14,11 @@
14 14
 
15 15
 import os
16 16
 import pytest
17
-import requests
18 17
 import yaml
19 18
 
20
-from devops.helpers import helpers
21
-from k8sclient.client import rest
22
-
23 19
 import base_test
24 20
 from fuel_ccp_tests.helpers import ext
25
-from fuel_ccp_tests.helpers import utils
21
+from fuel_ccp_tests.helpers import netchecker
26 22
 from fuel_ccp_tests import logger
27 23
 from fuel_ccp_tests import settings
28 24
 
@@ -40,102 +36,26 @@ class TestFuelCCPNetCheckerMixin:
40 36
         settings.NETCHECKER_AGENT_DIR, 'netchecker-agent.yaml')
41 37
     netchecker_files = (pod_yaml_file, svc_yaml_file, ds_yaml_file)
42 38
 
43
-    def start_netchecker_server(self, k8s):
39
+    @property
40
+    def pod_spec(self):
41
+        if not os.path.isfile(self.pod_yaml_file):
42
+            return None
44 43
         with open(self.pod_yaml_file) as pod_conf:
45
-            for pod_spec in yaml.load_all(pod_conf):
46
-                for container in pod_spec['spec']['containers']:
47
-                    if container['name'] == 'netchecker-server':
48
-                        container['image'] = '{0}:{1}'.format(
49
-                            settings.MCP_NETCHECKER_SERVER_IMAGE_REPO,
50
-                            settings.MCP_NETCHECKER_SERVER_VERSION)
51
-                try:
52
-                    if k8s.api.pods.get(name=pod_spec['metadata']['name']):
53
-                        LOG.debug('Network checker server pod {} is '
54
-                                  'already running! Skipping resource creation'
55
-                                  '.'.format(pod_spec['metadata']['name']))
56
-                        continue
57
-                except rest.ApiException as e:
58
-                    if e.status == 404:
59
-                        k8s.check_pod_create(body=pod_spec)
60
-                    else:
61
-                        raise e
44
+            return yaml.load(pod_conf)
62 45
 
46
+    @property
47
+    def svc_spec(self):
48
+        if not os.path.isfile(self.svc_yaml_file):
49
+            return None
63 50
         with open(self.svc_yaml_file) as svc_conf:
64
-            for svc_spec in yaml.load_all(svc_conf):
65
-                try:
66
-                    if k8s.api.services.get(
67
-                            name=svc_spec['metadata']['name']):
68
-                        LOG.debug('Network checker server service {} is '
69
-                                  'already running! Skipping resource creation'
70
-                                  '.'.format(svc_spec['metadata']['name']))
71
-                        continue
72
-                except rest.ApiException as e:
73
-                    if e.status == 404:
74
-                        k8s.check_service_create(body=svc_spec)
75
-                    else:
76
-                        raise e
77
-
78
-    def start_netchecker_agent(self, underlay, k8s):
79
-        # TODO(apanchenko): use python API client here when it will have
80
-        # TODO(apanchenko): needed functionality (able work with labels)
81
-        underlay.sudo_check_call(
82
-            "kubectl get nodes | awk '/Ready/{print $1}' | "
83
-            "xargs -I {} kubectl label nodes {} netchecker=agent --overwrite",
84
-            node_name='master')
51
+            return yaml.load(svc_conf)
85 52
 
53
+    @property
54
+    def ds_spec(self):
55
+        if not os.path.isfile(self.ds_yaml_file):
56
+            return None
86 57
         with open(self.ds_yaml_file) as ds_conf:
87
-            for daemon_set_spec in yaml.load_all(ds_conf):
88
-                for container in (daemon_set_spec['spec']['template']['spec']
89
-                                  ['containers']):
90
-                    if container['name'] == 'netchecker-agent':
91
-                        container['image'] = '{0}:{1}'.format(
92
-                            settings.MCP_NETCHECKER_AGENT_IMAGE_REPO,
93
-                            settings.MCP_NETCHECKER_AGENT_VERSION)
94
-                k8s.check_ds_create(body=daemon_set_spec)
95
-                k8s.wait_ds_ready(dsname=daemon_set_spec['metadata']['name'])
96
-
97
-    @staticmethod
98
-    @utils.retry(3, requests.exceptions.RequestException)
99
-    def get_netchecker_status(kube_host_ip, netchecker_pod_port=31081):
100
-        net_status_url = 'http://{0}:{1}/api/v1/connectivity_check'.format(
101
-            kube_host_ip, netchecker_pod_port)
102
-        return requests.get(net_status_url, timeout=5)
103
-
104
-    @staticmethod
105
-    def wait_netchecker_running(kube_host_ip, timeout=120, interval=5):
106
-        helpers.wait_pass(
107
-            lambda: TestFuelCCPNetChecker.get_netchecker_status(kube_host_ip),
108
-            timeout=timeout, interval=interval)
109
-
110
-    def check_network(self, kube_host_ip, works=True):
111
-        if works:
112
-            assert self.get_netchecker_status(kube_host_ip).status_code in \
113
-                (200, 204)
114
-        else:
115
-            assert self.get_netchecker_status(kube_host_ip).status_code == 400
116
-
117
-    def wait_check_network(self, kube_host_ip, works=True, timeout=120,
118
-                           interval=5):
119
-        helpers.wait_pass(
120
-            lambda: self.check_network(kube_host_ip, works=works),
121
-            timeout=timeout, interval=interval)
122
-
123
-    @staticmethod
124
-    def calico_block_traffic_on_node(underlay, target_node):
125
-        LOG.info('Blocked traffic to the network checker service from '
126
-                 'containers on node "{}".'.format(target_node))
127
-        underlay.sudo_check_call(
128
-            'calicoctl profile calico-k8s-network rule add '
129
-            '--at=1 outbound deny tcp to ports 8081',
130
-            node_name=target_node)
131
-
132
-    @staticmethod
133
-    def calico_unblock_traffic_on_node(underlay, target_node):
134
-        LOG.info('Unblocked traffic to the network checker service from '
135
-                 'containers on node "{}".'.format(target_node))
136
-        underlay.sudo_check_call(
137
-            'calicoctl profile calico-k8s-network rule remove outbound --at=1',
138
-            node_name=target_node)
58
+            return [i for i in yaml.load_all(ds_conf)]
139 59
 
140 60
 
141 61
 @pytest.mark.usefixtures("check_netchecker_files")
@@ -147,8 +67,7 @@ class TestFuelCCPNetChecker(base_test.SystemBaseTest,
147 67
     @pytest.mark.fail_snapshot
148 68
     @pytest.mark.snapshot_needed
149 69
     @pytest.mark.revert_snapshot(ext.SNAPSHOT.k8s_deployed)
150
-    def test_k8s_netchecker(self, underlay, k8scluster, config,
151
-                            show_step):
70
+    def test_k8s_netchecker(self, underlay, k8scluster, config, show_step):
152 71
         """Test for deploying an k8s environment with Calico and check
153 72
            connectivity between its networks
154 73
 
@@ -174,34 +93,35 @@ class TestFuelCCPNetChecker(base_test.SystemBaseTest,
174 93
 
175 94
         # STEP #2
176 95
         show_step(2)
177
-        self.start_netchecker_server(k8s=k8scluster)
178
-        self.wait_netchecker_running(config.k8s.kube_host, timeout=240)
96
+        netchecker.start_server(k8s=k8scluster, pod_spec=self.pod_spec,
97
+                                svc_spec=self.svc_spec)
98
+        netchecker.wait_running(config.k8s.kube_host, timeout=240)
179 99
 
180 100
         # STEP #3
181 101
         show_step(3)
182
-        self.wait_check_network(config.k8s.kube_host, works=False)
102
+        netchecker.wait_check_network(config.k8s.kube_host, works=False)
183 103
 
184 104
         # STEP #4
185 105
         show_step(4)
186
-        self.start_netchecker_agent(underlay, k8scluster)
106
+        netchecker.start_agent(k8s=k8scluster, ds_spec=self.ds_spec)
187 107
 
188 108
         # STEP #5
189 109
         show_step(5)
190
-        self.wait_check_network(config.k8s.kube_host, works=True)
110
+        netchecker.wait_check_network(config.k8s.kube_host, works=True)
191 111
 
192 112
         # STEP #6
193 113
         show_step(6)
194 114
         target_node = underlay.get_random_node()
195
-        self.calico_block_traffic_on_node(underlay, target_node)
115
+        netchecker.calico_block_traffic_on_node(underlay, target_node)
196 116
 
197 117
         # STEP #7
198 118
         show_step(7)
199
-        self.wait_check_network(config.k8s.kube_host, works=False)
119
+        netchecker.wait_check_network(config.k8s.kube_host, works=False)
200 120
 
201 121
         # STEP #8
202 122
         show_step(8)
203
-        self.calico_unblock_traffic_on_node(underlay, target_node)
123
+        netchecker.calico_unblock_traffic_on_node(underlay, target_node)
204 124
 
205 125
         # STEP #9
206 126
         show_step(9)
207
-        self.wait_check_network(config.k8s.kube_host, works=True)
127
+        netchecker.wait_check_network(config.k8s.kube_host, works=True)

Loading…
Cancel
Save