Refactor netchecker and calico tests
* moved methods for netchecker setup/start/stop to the helpers; * added a possiblity to install netchecker to k8s cluster w/o fetching additional repositories; * resolved #TODO (work with nodes labes via API). Change-Id: Ic6a8470ff53d7e95c36240d25d816db3c5a0d89d
This commit is contained in:
parent
6466a5d08b
commit
a1ac1910f5
|
@ -0,0 +1,286 @@
|
||||||
|
# Copyright 2016 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from devops.helpers import helpers
|
||||||
|
from k8sclient.client import rest
|
||||||
|
|
||||||
|
from fuel_ccp_tests import logger
|
||||||
|
from fuel_ccp_tests import settings
|
||||||
|
from fuel_ccp_tests.helpers import utils
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logger.logger
|
||||||
|
|
||||||
|
|
||||||
|
NETCHECKER_CONTAINER_PORT = NETCHECKER_SERVICE_PORT = 8081
|
||||||
|
NETCHECKER_NODE_PORT = 31081
|
||||||
|
NETCHECKER_REPORT_INTERVAL = 30
|
||||||
|
|
||||||
|
NETCHECKER_POD_CFG = {
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "Pod",
|
||||||
|
"metadata": {
|
||||||
|
"labels": {
|
||||||
|
"app": "netchecker-server"
|
||||||
|
},
|
||||||
|
"name": "netchecker-server"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"env": None,
|
||||||
|
"image": "127.0.0.1:31500/netchecker/server:latest",
|
||||||
|
"imagePullPolicy": "Always",
|
||||||
|
"name": "netchecker-server",
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"containerPort": NETCHECKER_CONTAINER_PORT,
|
||||||
|
"hostPort": NETCHECKER_CONTAINER_PORT
|
||||||
|
}
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"args": [
|
||||||
|
"proxy"
|
||||||
|
],
|
||||||
|
"image": ("gcr.io/google_containers/kubectl:"
|
||||||
|
"v0.18.0-120-gaeb4ac55ad12b1-dirty"),
|
||||||
|
"imagePullPolicy": "Always",
|
||||||
|
"name": "kubectl-proxy"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
NETCHECKER_SVC_CFG = {
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"kind": "Service",
|
||||||
|
"metadata": {
|
||||||
|
"name": "netchecker-service"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"ports": [
|
||||||
|
{
|
||||||
|
"nodePort": NETCHECKER_NODE_PORT,
|
||||||
|
"port": NETCHECKER_SERVICE_PORT,
|
||||||
|
"protocol": "TCP",
|
||||||
|
"targetPort": NETCHECKER_CONTAINER_PORT
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"selector": {
|
||||||
|
"app": "netchecker-server"
|
||||||
|
},
|
||||||
|
"type": "NodePort"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
NETCHECKER_DS_CFG = [
|
||||||
|
{
|
||||||
|
"apiVersion": "extensions/v1beta1",
|
||||||
|
"kind": "DaemonSet",
|
||||||
|
"metadata": {
|
||||||
|
"labels": {
|
||||||
|
"app": "netchecker-agent-hostnet"
|
||||||
|
},
|
||||||
|
"name": "netchecker-agent"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"template": {
|
||||||
|
"metadata": {
|
||||||
|
"labels": {
|
||||||
|
"app": "netchecker-agent"
|
||||||
|
},
|
||||||
|
"name": "netchecker-agent"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"name": "MY_POD_NAME",
|
||||||
|
"valueFrom": {
|
||||||
|
"fieldRef": {
|
||||||
|
"fieldPath": "metadata.name"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "REPORT_INTERVAL",
|
||||||
|
"value": str(NETCHECKER_REPORT_INTERVAL)
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"image": "127.0.0.1:31500/netchecker/agent:latest",
|
||||||
|
"imagePullPolicy": "Always",
|
||||||
|
"name": "netchecker-agent"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"nodeSelector": {
|
||||||
|
"netchecker": "agent"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"apiVersion": "extensions/v1beta1",
|
||||||
|
"kind": "DaemonSet",
|
||||||
|
"metadata": {
|
||||||
|
"labels": {
|
||||||
|
"app": "netchecker-agent-hostnet"
|
||||||
|
},
|
||||||
|
"name": "netchecker-agent-hostnet"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"template": {
|
||||||
|
"metadata": {
|
||||||
|
"labels": {
|
||||||
|
"app": "netchecker-agent-hostnet"
|
||||||
|
},
|
||||||
|
"name": "netchecker-agent-hostnet"
|
||||||
|
},
|
||||||
|
"spec": {
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"env": [
|
||||||
|
{
|
||||||
|
"name": "MY_POD_NAME",
|
||||||
|
"valueFrom": {
|
||||||
|
"fieldRef": {
|
||||||
|
"fieldPath": "metadata.name"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "REPORT_INTERVAL",
|
||||||
|
"value": str(NETCHECKER_REPORT_INTERVAL)
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"image": "127.0.0.1:31500/netchecker/agent:latest",
|
||||||
|
"imagePullPolicy": "Always",
|
||||||
|
"name": "netchecker-agent"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"hostNetwork": True,
|
||||||
|
"nodeSelector": {
|
||||||
|
"netchecker": "agent"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def start_server(k8s, namespace=None,
|
||||||
|
pod_spec=NETCHECKER_POD_CFG,
|
||||||
|
svc_spec=NETCHECKER_SVC_CFG):
|
||||||
|
"""Start netchecker server in k8s cluster
|
||||||
|
|
||||||
|
:param k8s: K8SManager
|
||||||
|
:param namespace: str
|
||||||
|
:param pod_spec: dict
|
||||||
|
:param svc_spec: dict
|
||||||
|
:return: None
|
||||||
|
"""
|
||||||
|
for container in pod_spec['spec']['containers']:
|
||||||
|
if container['name'] == 'netchecker-server':
|
||||||
|
container['image'] = '{0}:{1}'.format(
|
||||||
|
settings.MCP_NETCHECKER_SERVER_IMAGE_REPO,
|
||||||
|
settings.MCP_NETCHECKER_SERVER_VERSION)
|
||||||
|
try:
|
||||||
|
if k8s.api.pods.get(name=pod_spec['metadata']['name']):
|
||||||
|
LOG.debug('Network checker server pod {} is '
|
||||||
|
'already running! Skipping resource creation'
|
||||||
|
'.'.format(pod_spec['metadata']['name']))
|
||||||
|
except rest.ApiException as e:
|
||||||
|
if e.status == 404:
|
||||||
|
k8s.check_pod_create(body=pod_spec, namespace=namespace)
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
try:
|
||||||
|
if k8s.api.services.get(name=svc_spec['metadata']['name']):
|
||||||
|
LOG.debug('Network checker server service {} is '
|
||||||
|
'already running! Skipping resource creation'
|
||||||
|
'.'.format(svc_spec['metadata']['name']))
|
||||||
|
except rest.ApiException as e:
|
||||||
|
if e.status == 404:
|
||||||
|
k8s.check_service_create(body=svc_spec, namespace=namespace)
|
||||||
|
else:
|
||||||
|
raise e
|
||||||
|
|
||||||
|
|
||||||
|
def start_agent(k8s, namespace=None, ds_spec=NETCHECKER_DS_CFG):
|
||||||
|
"""Start netchecker agent in k8s cluster
|
||||||
|
|
||||||
|
:param k8s:
|
||||||
|
:param namespace:
|
||||||
|
:param ds_spec:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
for k8s_node in k8s.api.nodes.list():
|
||||||
|
k8s_node.add_labels({'netchecker': 'agent'})
|
||||||
|
|
||||||
|
for ds in ds_spec:
|
||||||
|
for container in (ds['spec']['template']['spec']['containers']):
|
||||||
|
if container['name'] == 'netchecker-agent':
|
||||||
|
container['image'] = '{0}:{1}'.format(
|
||||||
|
settings.MCP_NETCHECKER_AGENT_IMAGE_REPO,
|
||||||
|
settings.MCP_NETCHECKER_AGENT_VERSION)
|
||||||
|
k8s.check_ds_create(body=ds, namespace=namespace)
|
||||||
|
k8s.wait_ds_ready(dsname=ds['metadata']['name'], namespace=namespace)
|
||||||
|
|
||||||
|
|
||||||
|
@utils.retry(3, requests.exceptions.RequestException)
|
||||||
|
def get_status(kube_host_ip, netchecker_pod_port=NETCHECKER_NODE_PORT):
|
||||||
|
net_status_url = 'http://{0}:{1}/api/v1/connectivity_check'.format(
|
||||||
|
kube_host_ip, netchecker_pod_port)
|
||||||
|
return requests.get(net_status_url, timeout=5)
|
||||||
|
|
||||||
|
|
||||||
|
def wait_running(kube_host_ip, timeout=120, interval=5):
|
||||||
|
helpers.wait_pass(
|
||||||
|
lambda: get_status(kube_host_ip),
|
||||||
|
timeout=timeout, interval=interval)
|
||||||
|
|
||||||
|
|
||||||
|
def check_network(kube_host_ip, works=True):
|
||||||
|
if works:
|
||||||
|
assert get_status(kube_host_ip).status_code in (200, 204)
|
||||||
|
else:
|
||||||
|
assert get_status(kube_host_ip).status_code == 400
|
||||||
|
|
||||||
|
|
||||||
|
def wait_check_network(kube_host_ip, works=True, timeout=120, interval=5):
|
||||||
|
helpers.wait_pass(lambda: check_network(kube_host_ip, works=works),
|
||||||
|
timeout=timeout, interval=interval)
|
||||||
|
|
||||||
|
|
||||||
|
def calico_block_traffic_on_node(underlay, target_node):
|
||||||
|
LOG.info('Blocked traffic to the network checker service from '
|
||||||
|
'containers on node "{}".'.format(target_node))
|
||||||
|
underlay.sudo_check_call(
|
||||||
|
'calicoctl profile calico-k8s-network rule add --at=1 outbound '
|
||||||
|
'deny tcp to ports {0}'.format(NETCHECKER_SERVICE_PORT),
|
||||||
|
node_name=target_node)
|
||||||
|
|
||||||
|
|
||||||
|
def calico_unblock_traffic_on_node(underlay, target_node):
|
||||||
|
LOG.info('Unblocked traffic to the network checker service from '
|
||||||
|
'containers on node "{}".'.format(target_node))
|
||||||
|
underlay.sudo_check_call(
|
||||||
|
'calicoctl profile calico-k8s-network rule remove outbound --at=1',
|
||||||
|
node_name=target_node)
|
|
@ -214,11 +214,11 @@ class K8SManager(object):
|
||||||
'"{phase}" phase'.format(
|
'"{phase}" phase'.format(
|
||||||
pod_name=pod_name, phase=phase))
|
pod_name=pod_name, phase=phase))
|
||||||
|
|
||||||
def check_pod_create(self, body, timeout=300, interval=5):
|
def check_pod_create(self, body, namespace=None, timeout=300, interval=5):
|
||||||
"""Check creating sample pod
|
"""Check creating sample pod
|
||||||
|
|
||||||
:param k8s_pod: V1Pod
|
:param k8s_pod: V1Pod
|
||||||
:param k8sclient: K8sCluster
|
:param namespace: str
|
||||||
:rtype: V1Pod
|
:rtype: V1Pod
|
||||||
"""
|
"""
|
||||||
LOG.info("Creating pod in k8s cluster")
|
LOG.info("Creating pod in k8s cluster")
|
||||||
|
@ -228,10 +228,10 @@ class K8SManager(object):
|
||||||
)
|
)
|
||||||
LOG.debug("Timeout for creation is set to {}".format(timeout))
|
LOG.debug("Timeout for creation is set to {}".format(timeout))
|
||||||
LOG.debug("Checking interval is set to {}".format(interval))
|
LOG.debug("Checking interval is set to {}".format(interval))
|
||||||
pod = self.api.pods.create(body=body)
|
pod = self.api.pods.create(body=body, namespace=namespace)
|
||||||
pod.wait_running(timeout=300, interval=5)
|
pod.wait_running(timeout=300, interval=5)
|
||||||
LOG.info("Pod '{}' is created".format(pod.metadata.name))
|
LOG.info("Pod '{}' is created".format(pod.metadata.name))
|
||||||
return self.api.pods.get(name=pod.metadata.name)
|
return self.api.pods.get(name=pod.metadata.name, namespace=namespace)
|
||||||
|
|
||||||
def wait_pod_deleted(self, podname, timeout=60, interval=5):
|
def wait_pod_deleted(self, podname, timeout=60, interval=5):
|
||||||
helpers.wait(
|
helpers.wait(
|
||||||
|
@ -255,11 +255,11 @@ class K8SManager(object):
|
||||||
self.wait_pod_deleted(k8s_pod.name, timeout, interval)
|
self.wait_pod_deleted(k8s_pod.name, timeout, interval)
|
||||||
LOG.debug("Pod '{}' is deleted".format(k8s_pod.name))
|
LOG.debug("Pod '{}' is deleted".format(k8s_pod.name))
|
||||||
|
|
||||||
def check_service_create(self, body):
|
def check_service_create(self, body, namespace=None):
|
||||||
"""Check creating k8s service
|
"""Check creating k8s service
|
||||||
|
|
||||||
:param body: dict, service spec
|
:param body: dict, service spec
|
||||||
:param k8sclient: K8sCluster object
|
:param namespace: str
|
||||||
:rtype: K8sService object
|
:rtype: K8sService object
|
||||||
"""
|
"""
|
||||||
LOG.info("Creating service in k8s cluster")
|
LOG.info("Creating service in k8s cluster")
|
||||||
|
@ -267,15 +267,15 @@ class K8SManager(object):
|
||||||
"Service spec to create:\n{}".format(
|
"Service spec to create:\n{}".format(
|
||||||
yaml.dump(body, default_flow_style=False))
|
yaml.dump(body, default_flow_style=False))
|
||||||
)
|
)
|
||||||
service = self.api.services.create(body=body)
|
service = self.api.services.create(body=body, namespace=namespace)
|
||||||
LOG.info("Service '{}' is created".format(service.metadata.name))
|
LOG.info("Service '{}' is created".format(service.metadata.name))
|
||||||
return self.api.services.get(name=service.metadata.name)
|
return self.api.services.get(name=service.metadata.name)
|
||||||
|
|
||||||
def check_ds_create(self, body):
|
def check_ds_create(self, body, namespace=None):
|
||||||
"""Check creating k8s DaemonSet
|
"""Check creating k8s DaemonSet
|
||||||
|
|
||||||
:param body: dict, DaemonSet spec
|
:param body: dict, DaemonSet spec
|
||||||
:param k8sclient: K8sCluster object
|
:param namespace: str
|
||||||
:rtype: K8sDaemonSet object
|
:rtype: K8sDaemonSet object
|
||||||
"""
|
"""
|
||||||
LOG.info("Creating DaemonSet in k8s cluster")
|
LOG.info("Creating DaemonSet in k8s cluster")
|
||||||
|
@ -283,7 +283,7 @@ class K8SManager(object):
|
||||||
"DaemonSet spec to create:\n{}".format(
|
"DaemonSet spec to create:\n{}".format(
|
||||||
yaml.dump(body, default_flow_style=False))
|
yaml.dump(body, default_flow_style=False))
|
||||||
)
|
)
|
||||||
ds = self.api.daemonsets.create(body=body)
|
ds = self.api.daemonsets.create(body=body, namespace=namespace)
|
||||||
LOG.info("DaemonSet '{}' is created".format(ds.metadata.name))
|
LOG.info("DaemonSet '{}' is created".format(ds.metadata.name))
|
||||||
return self.api.daemonsets.get(name=ds.metadata.name)
|
return self.api.daemonsets.get(name=ds.metadata.name)
|
||||||
|
|
||||||
|
|
|
@ -245,13 +245,15 @@ NETCHECKER_AGENT_DIR = os.environ.get(
|
||||||
'NETCHECKER_AGENT_DIR', os.path.join(os.getcwd(), 'mcp-netchecker-agent')
|
'NETCHECKER_AGENT_DIR', os.path.join(os.getcwd(), 'mcp-netchecker-agent')
|
||||||
)
|
)
|
||||||
MCP_NETCHECKER_AGENT_IMAGE_REPO = os.environ.get(
|
MCP_NETCHECKER_AGENT_IMAGE_REPO = os.environ.get(
|
||||||
'MCP_NETCHECKER_AGENT_IMAGE_REPO')
|
'MCP_NETCHECKER_AGENT_IMAGE_REPO',
|
||||||
|
'quay.io/l23network/mcp-netchecker-agent')
|
||||||
MCP_NETCHECKER_AGENT_VERSION = os.environ.get(
|
MCP_NETCHECKER_AGENT_VERSION = os.environ.get(
|
||||||
'MCP_NETCHECKER_AGENT_VERSION')
|
'MCP_NETCHECKER_AGENT_VERSION', 'latest')
|
||||||
MCP_NETCHECKER_SERVER_IMAGE_REPO = os.environ.get(
|
MCP_NETCHECKER_SERVER_IMAGE_REPO = os.environ.get(
|
||||||
'MCP_NETCHECKER_SERVER_IMAGE_REPO')
|
'MCP_NETCHECKER_SERVER_IMAGE_REPO',
|
||||||
|
'quay.io/l23network/mcp-netchecker-server')
|
||||||
MCP_NETCHECKER_SERVER_VERSION = os.environ.get(
|
MCP_NETCHECKER_SERVER_VERSION = os.environ.get(
|
||||||
'MCP_NETCHECKER_SERVER_VERSION')
|
'MCP_NETCHECKER_SERVER_VERSION', 'latest')
|
||||||
|
|
||||||
# Settings for AppController testing
|
# Settings for AppController testing
|
||||||
# AC_PATH - path to k8s-AppController repo
|
# AC_PATH - path to k8s-AppController repo
|
||||||
|
|
|
@ -15,18 +15,14 @@
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
import base_test
|
import base_test
|
||||||
import test_netchecker
|
|
||||||
|
|
||||||
from fuel_ccp_tests import logger
|
from fuel_ccp_tests import logger
|
||||||
|
from fuel_ccp_tests.helpers import netchecker
|
||||||
|
|
||||||
LOG = logger.logger
|
LOG = logger.logger
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.usefixtures("check_netchecker_files")
|
|
||||||
@pytest.mark.usefixtures("check_netchecker_images_settings")
|
|
||||||
@pytest.mark.usefixtures("check_calico_images_settings")
|
@pytest.mark.usefixtures("check_calico_images_settings")
|
||||||
class TestFuelCCPCalico(base_test.SystemBaseTest,
|
class TestFuelCCPCalico(base_test.SystemBaseTest):
|
||||||
test_netchecker.TestFuelCCPNetCheckerMixin):
|
|
||||||
"""Test class for Calico network plugin in k8s"""
|
"""Test class for Calico network plugin in k8s"""
|
||||||
|
|
||||||
@pytest.mark.fail_snapshot
|
@pytest.mark.fail_snapshot
|
||||||
|
@ -52,13 +48,13 @@ class TestFuelCCPCalico(base_test.SystemBaseTest,
|
||||||
|
|
||||||
# STEP #2
|
# STEP #2
|
||||||
show_step(2)
|
show_step(2)
|
||||||
self.start_netchecker_server(k8s=k8scluster)
|
netchecker.start_server(k8s=k8scluster)
|
||||||
self.wait_netchecker_running(config.k8s.kube_host, timeout=240)
|
netchecker.wait_running(config.k8s.kube_host, timeout=240)
|
||||||
|
|
||||||
# STEP #3
|
# STEP #3
|
||||||
show_step(3)
|
show_step(3)
|
||||||
self.start_netchecker_agent(underlay, k8scluster)
|
netchecker.start_agent(k8s=k8scluster)
|
||||||
|
|
||||||
# STEP #4
|
# STEP #4
|
||||||
show_step(4)
|
show_step(4)
|
||||||
self.wait_check_network(config.k8s.kube_host, works=True)
|
netchecker.wait_check_network(config.k8s.kube_host, works=True)
|
||||||
|
|
|
@ -14,15 +14,11 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import pytest
|
import pytest
|
||||||
import requests
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from devops.helpers import helpers
|
|
||||||
from k8sclient.client import rest
|
|
||||||
|
|
||||||
import base_test
|
import base_test
|
||||||
from fuel_ccp_tests.helpers import ext
|
from fuel_ccp_tests.helpers import ext
|
||||||
from fuel_ccp_tests.helpers import utils
|
from fuel_ccp_tests.helpers import netchecker
|
||||||
from fuel_ccp_tests import logger
|
from fuel_ccp_tests import logger
|
||||||
from fuel_ccp_tests import settings
|
from fuel_ccp_tests import settings
|
||||||
|
|
||||||
|
@ -40,102 +36,26 @@ class TestFuelCCPNetCheckerMixin:
|
||||||
settings.NETCHECKER_AGENT_DIR, 'netchecker-agent.yaml')
|
settings.NETCHECKER_AGENT_DIR, 'netchecker-agent.yaml')
|
||||||
netchecker_files = (pod_yaml_file, svc_yaml_file, ds_yaml_file)
|
netchecker_files = (pod_yaml_file, svc_yaml_file, ds_yaml_file)
|
||||||
|
|
||||||
def start_netchecker_server(self, k8s):
|
@property
|
||||||
|
def pod_spec(self):
|
||||||
|
if not os.path.isfile(self.pod_yaml_file):
|
||||||
|
return None
|
||||||
with open(self.pod_yaml_file) as pod_conf:
|
with open(self.pod_yaml_file) as pod_conf:
|
||||||
for pod_spec in yaml.load_all(pod_conf):
|
return yaml.load(pod_conf)
|
||||||
for container in pod_spec['spec']['containers']:
|
|
||||||
if container['name'] == 'netchecker-server':
|
|
||||||
container['image'] = '{0}:{1}'.format(
|
|
||||||
settings.MCP_NETCHECKER_SERVER_IMAGE_REPO,
|
|
||||||
settings.MCP_NETCHECKER_SERVER_VERSION)
|
|
||||||
try:
|
|
||||||
if k8s.api.pods.get(name=pod_spec['metadata']['name']):
|
|
||||||
LOG.debug('Network checker server pod {} is '
|
|
||||||
'already running! Skipping resource creation'
|
|
||||||
'.'.format(pod_spec['metadata']['name']))
|
|
||||||
continue
|
|
||||||
except rest.ApiException as e:
|
|
||||||
if e.status == 404:
|
|
||||||
k8s.check_pod_create(body=pod_spec)
|
|
||||||
else:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
|
@property
|
||||||
|
def svc_spec(self):
|
||||||
|
if not os.path.isfile(self.svc_yaml_file):
|
||||||
|
return None
|
||||||
with open(self.svc_yaml_file) as svc_conf:
|
with open(self.svc_yaml_file) as svc_conf:
|
||||||
for svc_spec in yaml.load_all(svc_conf):
|
return yaml.load(svc_conf)
|
||||||
try:
|
|
||||||
if k8s.api.services.get(
|
|
||||||
name=svc_spec['metadata']['name']):
|
|
||||||
LOG.debug('Network checker server service {} is '
|
|
||||||
'already running! Skipping resource creation'
|
|
||||||
'.'.format(svc_spec['metadata']['name']))
|
|
||||||
continue
|
|
||||||
except rest.ApiException as e:
|
|
||||||
if e.status == 404:
|
|
||||||
k8s.check_service_create(body=svc_spec)
|
|
||||||
else:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
def start_netchecker_agent(self, underlay, k8s):
|
|
||||||
# TODO(apanchenko): use python API client here when it will have
|
|
||||||
# TODO(apanchenko): needed functionality (able work with labels)
|
|
||||||
underlay.sudo_check_call(
|
|
||||||
"kubectl get nodes | awk '/Ready/{print $1}' | "
|
|
||||||
"xargs -I {} kubectl label nodes {} netchecker=agent --overwrite",
|
|
||||||
node_name='master')
|
|
||||||
|
|
||||||
|
@property
|
||||||
|
def ds_spec(self):
|
||||||
|
if not os.path.isfile(self.ds_yaml_file):
|
||||||
|
return None
|
||||||
with open(self.ds_yaml_file) as ds_conf:
|
with open(self.ds_yaml_file) as ds_conf:
|
||||||
for daemon_set_spec in yaml.load_all(ds_conf):
|
return [i for i in yaml.load_all(ds_conf)]
|
||||||
for container in (daemon_set_spec['spec']['template']['spec']
|
|
||||||
['containers']):
|
|
||||||
if container['name'] == 'netchecker-agent':
|
|
||||||
container['image'] = '{0}:{1}'.format(
|
|
||||||
settings.MCP_NETCHECKER_AGENT_IMAGE_REPO,
|
|
||||||
settings.MCP_NETCHECKER_AGENT_VERSION)
|
|
||||||
k8s.check_ds_create(body=daemon_set_spec)
|
|
||||||
k8s.wait_ds_ready(dsname=daemon_set_spec['metadata']['name'])
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@utils.retry(3, requests.exceptions.RequestException)
|
|
||||||
def get_netchecker_status(kube_host_ip, netchecker_pod_port=31081):
|
|
||||||
net_status_url = 'http://{0}:{1}/api/v1/connectivity_check'.format(
|
|
||||||
kube_host_ip, netchecker_pod_port)
|
|
||||||
return requests.get(net_status_url, timeout=5)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def wait_netchecker_running(kube_host_ip, timeout=120, interval=5):
|
|
||||||
helpers.wait_pass(
|
|
||||||
lambda: TestFuelCCPNetChecker.get_netchecker_status(kube_host_ip),
|
|
||||||
timeout=timeout, interval=interval)
|
|
||||||
|
|
||||||
def check_network(self, kube_host_ip, works=True):
|
|
||||||
if works:
|
|
||||||
assert self.get_netchecker_status(kube_host_ip).status_code in \
|
|
||||||
(200, 204)
|
|
||||||
else:
|
|
||||||
assert self.get_netchecker_status(kube_host_ip).status_code == 400
|
|
||||||
|
|
||||||
def wait_check_network(self, kube_host_ip, works=True, timeout=120,
|
|
||||||
interval=5):
|
|
||||||
helpers.wait_pass(
|
|
||||||
lambda: self.check_network(kube_host_ip, works=works),
|
|
||||||
timeout=timeout, interval=interval)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def calico_block_traffic_on_node(underlay, target_node):
|
|
||||||
LOG.info('Blocked traffic to the network checker service from '
|
|
||||||
'containers on node "{}".'.format(target_node))
|
|
||||||
underlay.sudo_check_call(
|
|
||||||
'calicoctl profile calico-k8s-network rule add '
|
|
||||||
'--at=1 outbound deny tcp to ports 8081',
|
|
||||||
node_name=target_node)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def calico_unblock_traffic_on_node(underlay, target_node):
|
|
||||||
LOG.info('Unblocked traffic to the network checker service from '
|
|
||||||
'containers on node "{}".'.format(target_node))
|
|
||||||
underlay.sudo_check_call(
|
|
||||||
'calicoctl profile calico-k8s-network rule remove outbound --at=1',
|
|
||||||
node_name=target_node)
|
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.usefixtures("check_netchecker_files")
|
@pytest.mark.usefixtures("check_netchecker_files")
|
||||||
|
@ -147,8 +67,7 @@ class TestFuelCCPNetChecker(base_test.SystemBaseTest,
|
||||||
@pytest.mark.fail_snapshot
|
@pytest.mark.fail_snapshot
|
||||||
@pytest.mark.snapshot_needed
|
@pytest.mark.snapshot_needed
|
||||||
@pytest.mark.revert_snapshot(ext.SNAPSHOT.k8s_deployed)
|
@pytest.mark.revert_snapshot(ext.SNAPSHOT.k8s_deployed)
|
||||||
def test_k8s_netchecker(self, underlay, k8scluster, config,
|
def test_k8s_netchecker(self, underlay, k8scluster, config, show_step):
|
||||||
show_step):
|
|
||||||
"""Test for deploying an k8s environment with Calico and check
|
"""Test for deploying an k8s environment with Calico and check
|
||||||
connectivity between its networks
|
connectivity between its networks
|
||||||
|
|
||||||
|
@ -174,34 +93,35 @@ class TestFuelCCPNetChecker(base_test.SystemBaseTest,
|
||||||
|
|
||||||
# STEP #2
|
# STEP #2
|
||||||
show_step(2)
|
show_step(2)
|
||||||
self.start_netchecker_server(k8s=k8scluster)
|
netchecker.start_server(k8s=k8scluster, pod_spec=self.pod_spec,
|
||||||
self.wait_netchecker_running(config.k8s.kube_host, timeout=240)
|
svc_spec=self.svc_spec)
|
||||||
|
netchecker.wait_running(config.k8s.kube_host, timeout=240)
|
||||||
|
|
||||||
# STEP #3
|
# STEP #3
|
||||||
show_step(3)
|
show_step(3)
|
||||||
self.wait_check_network(config.k8s.kube_host, works=False)
|
netchecker.wait_check_network(config.k8s.kube_host, works=False)
|
||||||
|
|
||||||
# STEP #4
|
# STEP #4
|
||||||
show_step(4)
|
show_step(4)
|
||||||
self.start_netchecker_agent(underlay, k8scluster)
|
netchecker.start_agent(k8s=k8scluster, ds_spec=self.ds_spec)
|
||||||
|
|
||||||
# STEP #5
|
# STEP #5
|
||||||
show_step(5)
|
show_step(5)
|
||||||
self.wait_check_network(config.k8s.kube_host, works=True)
|
netchecker.wait_check_network(config.k8s.kube_host, works=True)
|
||||||
|
|
||||||
# STEP #6
|
# STEP #6
|
||||||
show_step(6)
|
show_step(6)
|
||||||
target_node = underlay.get_random_node()
|
target_node = underlay.get_random_node()
|
||||||
self.calico_block_traffic_on_node(underlay, target_node)
|
netchecker.calico_block_traffic_on_node(underlay, target_node)
|
||||||
|
|
||||||
# STEP #7
|
# STEP #7
|
||||||
show_step(7)
|
show_step(7)
|
||||||
self.wait_check_network(config.k8s.kube_host, works=False)
|
netchecker.wait_check_network(config.k8s.kube_host, works=False)
|
||||||
|
|
||||||
# STEP #8
|
# STEP #8
|
||||||
show_step(8)
|
show_step(8)
|
||||||
self.calico_unblock_traffic_on_node(underlay, target_node)
|
netchecker.calico_unblock_traffic_on_node(underlay, target_node)
|
||||||
|
|
||||||
# STEP #9
|
# STEP #9
|
||||||
show_step(9)
|
show_step(9)
|
||||||
self.wait_check_network(config.k8s.kube_host, works=True)
|
netchecker.wait_check_network(config.k8s.kube_host, works=True)
|
||||||
|
|
Loading…
Reference in New Issue