Added k8s pod to pod connectivity for starlingx pytest framework

Include:
-added automated test case for pod to pod connectivity
-added pod related yaml files
-fixed test cases dvr, multiple ports, ping vms, and
vm meta data retrieval

Story: 2007406
Task: 39009

Change-Id: Ib20ee48d4a3769da449dc0fe487ea35cd812fb17
Author: Ayyappa Mantri <ayyappa.mantri@windriver.com>
Co-Authored-By: Yvonne Ding <yvonne.ding@windriver.com>
Signed-off-by: Yvonne Ding <yvonne.ding@windriver.com>
This commit is contained in:
Yvonne Ding 2020-03-19 10:19:33 -04:00
parent 208dfed12a
commit a98da79ef4
12 changed files with 362 additions and 6 deletions

View File

@ -40,7 +40,8 @@ class ProjVar:
'INSTANCE_BACKING': {}, 'INSTANCE_BACKING': {},
'OPENSTACK_DEPLOYED': None, 'OPENSTACK_DEPLOYED': None,
'DEFAULT_INSTANCE_BACKING': None, 'DEFAULT_INSTANCE_BACKING': None,
'STX_KEYFILE_PATH': '~/.ssh/id_rsa' 'STX_KEYFILE_PATH': '~/.ssh/id_rsa',
'IPV6_OAM': None,
} }
@classmethod @classmethod

View File

@ -16,6 +16,7 @@ from contextlib import contextmanager
from datetime import datetime from datetime import datetime
import pexpect import pexpect
import yaml
from pytest import skip from pytest import skip
from consts.auth import Tenant, TestFileServer, HostLinuxUser from consts.auth import Tenant, TestFileServer, HostLinuxUser
@ -785,3 +786,34 @@ def ssh_to_remote_node(host, username=None, password=None, prompt=None,
finally: finally:
if current_host != original_host: if current_host != original_host:
remote_ssh.close() remote_ssh.close()
def get_yaml_data(filepath):
"""
Returns the yaml data in json
Args:
filepath(str): location of the yaml file to load
Return(json):
returns the json data
"""
with open(filepath, 'r') as f:
data = yaml.safe_load(f)
return data
def write_yaml_data_to_file(data, filename, directory=None):
"""
Writes data to a file in yaml format
Args:
data(json): data in json format
filename(str): filename
directory(boo): directory to save the file
Return(str):
returns the location of the yaml file
"""
if directory is None:
directory = ProjVar.get_var('LOG_DIR')
src_path = "{}/{}".format(directory, filename)
with open(src_path, 'w') as f:
yaml.dump(data, f)
return src_path

View File

@ -622,6 +622,25 @@ def get_pod_value_jsonpath(type_name, jsonpath, namespace=None, con_ssh=None):
return value return value
def expose_the_service(deployment_name, type, service_name, namespace=None, con_ssh=None):
"""
Exposes the service of a deployment
Args:
deployment_name (str): name of deployment
type (str): "LoadBalancer" or "NodePort"
service_name(str): service name
namespace (str|None): e.g., 'kube-system'
con_ssh:
Returns (str):
"""
args = '{} --type={} --name={}'.format(deployment_name, type, service_name)
if namespace:
args += ' --namespace {}'.format(namespace)
return exec_kube_cmd('expose deployment', args, con_ssh=con_ssh)
def get_nodes(hosts=None, status=None, field='STATUS', exclude=False, def get_nodes(hosts=None, status=None, field='STATUS', exclude=False,
con_ssh=None, fail_ok=False): con_ssh=None, fail_ok=False):
""" """

View File

@ -3687,3 +3687,20 @@ def is_active_controller(host, con_ssh=None,
def is_lowlatency_host(host): def is_lowlatency_host(host):
subfuncs = get_host_values(host=host, fields='subfunctions')[0] subfuncs = get_host_values(host=host, fields='subfunctions')[0]
return 'lowlatency' in subfuncs return 'lowlatency' in subfuncs
def get_system_iplist():
"""
Checks the ipv4 or ipv6 simplex or other and returns the ip list accordingly
Return: returns the system ipv4/ipv6 list
"""
ip = []
out = get_oam_values()
if is_aio_simplex():
ip.append(out["oam_ip"])
else:
ip.extend([out["oam_floating_ip"], out["oam_c0_ip"], out["oam_c1_ip"]])
if ProjVar.get_var('IPV6_OAM'):
iplist = ["[{}]".format(i) for i in ip]
ip = iplist
return ip

View File

@ -21,7 +21,7 @@ result_ = None
@fixture(scope='module') @fixture(scope='module')
def router_info(request): def router_info(request, stx_openstack_required):
global result_ global result_
result_ = False result_ = False

View File

@ -89,7 +89,7 @@ def _boot_multiports_vm(flavor, mgmt_net_id, vifs, net_id, net_type, base_vm,
class TestMutiPortsBasic: class TestMutiPortsBasic:
@fixture(scope='class') @fixture(scope='class')
def base_setup(self): def base_setup(self, stx_openstack_required):
flavor_id = nova_helper.create_flavor(name='dedicated')[1] flavor_id = nova_helper.create_flavor(name='dedicated')[1]
ResourceCleanup.add('flavor', flavor_id, scope='class') ResourceCleanup.add('flavor', flavor_id, scope='class')
@ -209,7 +209,7 @@ class TestMutiPortsBasic:
class TestMutiPortsPCI: class TestMutiPortsPCI:
@fixture(scope='class') @fixture(scope='class')
def base_setup_pci(self): def base_setup_pci(self, stx_openstack_required):
LOG.fixture_step( LOG.fixture_step(
"(class) Get an internal network that supports both pci-sriov and " "(class) Get an internal network that supports both pci-sriov and "
"pcipt vif to boot vm") "pcipt vif to boot vm")

View File

@ -53,7 +53,7 @@ def _compose_nics(vifs, net_ids, image_id, guest_os):
marks=mark.priorities('cpe_sanity', 'sanity', 'sx_sanity')), marks=mark.priorities('cpe_sanity', 'sanity', 'sx_sanity')),
('ubuntu_14', 'virtio', 'virtio'), ('ubuntu_14', 'virtio', 'virtio'),
], ids=id_gen) ], ids=id_gen)
def test_ping_between_two_vms(guest_os, vm1_vifs, vm2_vifs): def test_ping_between_two_vms(stx_openstack_required, guest_os, vm1_vifs, vm2_vifs):
""" """
Ping between two vms with given vif models Ping between two vms with given vif models

View File

@ -0,0 +1,234 @@
#
# Copyright (c) 2020 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
import copy
from pytest import mark, fixture
from utils.tis_log import LOG
from utils import rest
from consts.proj_vars import ProjVar
from consts.auth import HostLinuxUser
from keywords import system_helper, kube_helper, common
@fixture(scope="class")
def deploy_test_pods(request):
"""
Fixture to deploy the server app,client app and returns serverips & client pods
- Label the nodes and add node selector to the deployment files
if not simplex system
- Copy the deployment files from localhost to active controller
- Deploy server pod
- Deploy client pods
- Get the server pods and client pods
- Get the server pods and client pods status before test begins
- Delete the service
- Delete the server pod deployment
- Delete the client pods
- Remove the labels on the nodes if not simplex
"""
server_dep_file = "server_pod.yaml"
home_dir = HostLinuxUser.get_home()
service_name = "test-service"
client_pod1_name = "client-pod1"
client_pod2_name = "client-pod2"
server_dep_file_path = "utils/test_files/server_pod_deploy.yaml"
client_pod_template_file_path = "utils/test_files/client_pod.yaml"
server_pod_dep_data = common.get_yaml_data(server_dep_file_path)
client_pod1_data = common.get_yaml_data(client_pod_template_file_path)
client_pod2_data = copy.deepcopy(client_pod1_data)
client_pod1_data['metadata']['name'] = client_pod1_name
client_pod2_data['metadata']['name'] = client_pod2_name
deployment_name = server_pod_dep_data['metadata']['name']
computes = system_helper.get_hypervisors(
operational="enabled", availability="available")
if len(computes) > 1:
LOG.fixture_step("Label the nodes and add node selector to the deployment files\
if not simplex system")
kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format(
computes[0]), args="test=server")
kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format(
computes[1]), args="test=client")
server_pod_dep_data['spec']['template']['spec']['nodeSelector'] = {
'test': 'server'}
client_pod1_data['spec']['nodeSelector'] = {'test': 'server'}
client_pod2_data['spec']['nodeSelector'] = {'test': 'client'}
server_pod_path = common.write_yaml_data_to_file(
server_pod_dep_data, server_dep_file)
client_pod1_path = common.write_yaml_data_to_file(
client_pod1_data, "{}.yaml".format(client_pod1_name))
client_pod2_path = common.write_yaml_data_to_file(
client_pod2_data, "{}.yaml".format(client_pod2_name))
LOG.fixture_step(
"Copy the deployment files from localhost to active controller")
common.scp_from_localhost_to_active_controller(
source_path=server_pod_path, dest_path=home_dir)
common.scp_from_localhost_to_active_controller(
source_path=client_pod1_path, dest_path=home_dir)
common.scp_from_localhost_to_active_controller(
source_path=client_pod2_path, dest_path=home_dir)
LOG.fixture_step("Deploy server pods {}".format(server_dep_file))
kube_helper.exec_kube_cmd(sub_cmd="create -f ", args=server_dep_file)
LOG.fixture_step("Deploy client pod {}.yaml & client pod {}.yaml".format(
client_pod1_name, client_pod2_name))
kube_helper.exec_kube_cmd(sub_cmd="create -f ",
args="{}.yaml".format(client_pod1_name))
kube_helper.exec_kube_cmd(sub_cmd="create -f ",
args="{}.yaml".format(client_pod2_name))
LOG.fixture_step("Get the server pods and client pods")
server_pods = kube_helper.get_pods(labels="server=pod-to-pod")
client_pods = kube_helper.get_pods(labels="client=pod-to-pod")
def teardown():
LOG.fixture_step("Delete the service {}".format(service_name))
kube_helper.exec_kube_cmd(
sub_cmd="delete service ", args=service_name)
LOG.fixture_step("Delete the deployment {}".format(deployment_name))
kube_helper.exec_kube_cmd(
sub_cmd="delete deployment ", args=deployment_name)
LOG.fixture_step("Delete the client pods {} & {}".format(
client_pod1_name, client_pod2_name))
kube_helper.delete_resources(labels="client=pod-to-pod")
if len(computes) > 1:
LOG.fixture_step("Remove the labels on the nodes if not simplex")
kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format(
computes[0]), args="test-")
kube_helper.exec_kube_cmd(sub_cmd="label nodes {}".format(
computes[1]), args="test-")
request.addfinalizer(teardown)
LOG.fixture_step("Get the server pods and client pods status before test begins")
kube_helper.wait_for_pods_status(
pod_names=server_pods+client_pods, namespace="default")
return get_pod_ips(server_pods), client_pods, deployment_name, service_name
def get_pod_ips(pods):
"""
Returns the pods ips
Args:
pods(list): list of pod names
Returns: pod ips
"""
pod_ips = []
for i in pods:
pod_ips.append(kube_helper.get_pod_value_jsonpath(
"pod {}".format(i), "{.status.podIP}"))
return pod_ips
@mark.platform_sanity
@mark.dc_subcloud
class TestPodtoPod:
def test_pod_to_pod_connection(self, deploy_test_pods):
"""
Verify Ping test between pods
Args:
deploy_test_pods(fixture): returns server_ips, client_pods, deployment_name, service_name
Setup:
- Label the nodes and add node selector to the deployment files
if not simplex system
- Copy the deployment files from localhost to active controller
- Deploy server pod
- Deploy client pods
Steps:
- Ping the server pod ip from the client pod
Teardown:
- Delete the service
- Delete the server pod deployment
- Delete the client pods
- Remove the labels on the nodes if not simplex
"""
server_ips, client_pods, _, _ = deploy_test_pods
for client_pod in client_pods:
for ip in server_ips:
LOG.tc_step("Ping the server pod ip {} from the client pod {}".format(
ip, client_pod))
cmd = "ping -c 3 {} -w 5".format(ip)
code, _ = kube_helper.exec_cmd_in_container(
cmd=cmd, pod=client_pod)
assert code == 0
def test_pod_to_service_connection(self, deploy_test_pods):
"""
Verify client pod to service multiple endpoints access
Args:
deploy_test_pods(fixture): returns server_ips, client_pods, deployment_name, service_name
Setup:
- Label the nodes and add node selector to the deployment files
if not simplex system
- Copy the deployment files from localhost to active controller
- Deploy server pod
- Deploy client pods
Steps:
- Curl the server pod ip from the client pod
Teardown:
- Delete the service
- Delete the server pod deployment
- Delete the client pods
- Remove the labels on the nodes if not simplex
"""
server_ips, client_pods, _, _ = deploy_test_pods
for client_pod in client_pods:
for ip in server_ips:
if ProjVar.get_var('IPV6_OAM'):
ip = "[{}]".format(ip)
cmd = "curl -Is {}:8080".format(ip)
LOG.tc_step("Curl({}) the server pod ip {} from the client pod {}".format(
cmd, ip, client_pod))
code, _ = kube_helper.exec_cmd_in_container(
cmd=cmd, pod=client_pod)
assert code == 0
def test_host_to_service_connection(self, deploy_test_pods):
"""
Verify the service connectivity from external network
Args:
deploy_test_pods(fixture): returns server_ips, client_pods, deployment_name, service_name
Setup:
- Label the nodes and add node selector to the deployment files
if not simplex system
- Copy the deployment files from localhost to active controller
- Deploy server pod
- Deploy client pods
Steps:
- Expose the service with NodePort
- Check the service access from local host
Teardown:
- Delete the service
- Delete the server pod deployment
- Delete the client pods
- Remove the labels on the nodes if not simplex
"""
_, _, deploy_name, service_name = deploy_test_pods
LOG.tc_step("Expose the service {} with NodePort".format(service_name))
kube_helper.expose_the_service(
deployment_name=deploy_name, type="NodePort", service_name=service_name)
node_port = kube_helper.get_pod_value_jsonpath(
"service {}".format(service_name), "{.spec.ports[0].nodePort}")
for i in system_helper.get_system_iplist():
url = "http://{}:{}".format(i, node_port)
LOG.tc_step(
"Check the service access {} from local host".format(url))
rest.check_url(url)

View File

@ -6,7 +6,7 @@ from consts.stx import METADATA_SERVER
@mark.sanity @mark.sanity
def test_vm_meta_data_retrieval(): def test_vm_meta_data_retrieval(stx_openstack_required):
""" """
VM meta-data retrieval VM meta-data retrieval

View File

@ -195,3 +195,21 @@ class Rest:
headers=headers, data=json_data, headers=headers, data=json_data,
verify=verify) verify=verify)
return r.status_code, r.json() return r.status_code, r.json()
def check_url(url, fail=False, secure=False):
"""
Checks the access to the given url and returns True or False based on fail condition
Args:
url(str): url to check the access
fail(boolean): True or False
secure(boolean): default is False for
both http and https protocol
Return(boolean):
returns True or False based on expected behaviour
"""
try:
r = requests.get(url, timeout=10, verify=secure)
return True if r.status_code == 200 and fail is False else False
except requests.exceptions.Timeout:
return True if fail else False

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: client-pod
namespace: default
labels:
client: pod-to-pod
spec:
containers:
- image: mantri425/wind-test:latest
command: ["/bin/sh","-c"]
args: ["sleep 60m"]
imagePullPolicy: IfNotPresent
name: client-container
restartPolicy: Always

View File

@ -0,0 +1,20 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: server-pod-dep
spec:
selector:
matchLabels:
server: pod-to-pod
replicas: 2
template:
metadata:
labels:
server: pod-to-pod
spec:
containers:
- name: server-container
image: gcr.io/google-samples/node-hello:1.0
ports:
- containerPort: 8080
protocol: TCP