Merge "Adding file resources used by system test TCs. Adding first system test TC (deploy, scale and remove deployments). Adding new object for k8s get deployments. Updating existing k8s objects to support "namespace" parameter."

This commit is contained in:
Zuul
2025-08-29 12:30:14 +00:00
committed by Gerrit Code Review
50 changed files with 2110 additions and 27 deletions

View File

@@ -3,6 +3,7 @@ from framework.logging.automation_logger import get_logger
from framework.validation.validation import validate_str_contains
from keywords.base_keyword import BaseKeyword
from keywords.docker.login.docker_login_keywords import DockerLoginKeywords
import re
class DockerLoadImageKeywords(BaseKeyword):
@@ -25,12 +26,19 @@ class DockerLoadImageKeywords(BaseKeyword):
image_file_name (): the image file name
Returns:
The loaded image name (str)
"""
output = self.ssh_connection.send_as_sudo(f"docker load -i {image_file_name}")
string_output = "".join(output)
validate_str_contains(string_output, "Loaded image", "Image")
for line in output:
match = re.search(r"Loaded image:\s*(\S+)", line)
if match:
return match.group(1)
def tag_docker_image_for_registry(self, image_name: str, tag_name: str, registry: Registry):
"""
Tags the docker image for the registry
@@ -56,7 +64,6 @@ class DockerLoadImageKeywords(BaseKeyword):
registry (): the registry
Returns:
"""
DockerLoginKeywords(self.ssh_connection).login(registry.get_user_name(), registry.get_password(), registry.get_registry_url())
self.ssh_connection.send_as_sudo(f'docker push {registry.get_registry_url()}/{tag_name}')
self.ssh_connection.send_as_sudo(f'docker push {registry.get_registry_url()}/{tag_name}')

View File

@@ -13,7 +13,7 @@ class KubectlDeleteDeploymentsKeywords(BaseKeyword):
"""
self.ssh_connection = ssh_connection
def delete_deployment(self, deployment_name: str) -> str:
def delete_deployment(self, deployment_name: str, namespace: str = None) -> str:
"""
Deletes the given deployment
Args:
@@ -22,7 +22,10 @@ class KubectlDeleteDeploymentsKeywords(BaseKeyword):
Returns: the output
"""
output = self.ssh_connection.send(export_k8s_config(f"kubectl delete deployment {deployment_name}"))
cmd = f"kubectl delete deployment {deployment_name}"
if namespace:
cmd = f"{cmd} -n {namespace}"
output = self.ssh_connection.send(export_k8s_config(cmd))
self.validate_success_return_code(self.ssh_connection)
return output

View File

@@ -0,0 +1,53 @@
from keywords.base_keyword import BaseKeyword
from keywords.k8s.k8s_command_wrapper import export_k8s_config
from keywords.k8s.deployments.object.kubectl_get_deployments_output import KubectlGetDeploymentOutput
class KubectlGetDeploymentsKeywords(BaseKeyword):
"""
Class for Expose Deployment Keywords
"""
def __init__(self, ssh_connection):
"""
Constructor
Args:
ssh_connection:
"""
self.ssh_connection = ssh_connection
def get_deployment(self, deployment_name: str):
"""
Get the deployments from the specified name
Args:
deployment_name (str): the deployment name
Returns:
A list
"""
kubectl_get_deployments_output = self.ssh_connection.send(export_k8s_config(f"kubectl get deployment {deployment_name}"))
self.validate_success_return_code(self.ssh_connection)
deployments_list_output = KubectlGetDeploymentOutput(kubectl_get_deployments_output)
return deployments_list_output
def get_deployments(self, namespace: str = None):
"""
Get the deployments from the specified namespace, or all namespaces if not specified.
Args:
deployment_name (str): the deployment name
namespace (str): the namespace
Returns:
A list
"""
cmd = f"kubectl get deployment"
if namespace:
cmd = f"{cmd} -n {namespace}"
kubectl_get_deployments_output = self.ssh_connection.send(export_k8s_config(cmd))
self.validate_success_return_code(self.ssh_connection)
deployments_list_output = KubectlGetDeploymentOutput(kubectl_get_deployments_output)
return deployments_list_output

View File

@@ -0,0 +1,32 @@
from keywords.base_keyword import BaseKeyword
from keywords.k8s.k8s_command_wrapper import export_k8s_config
class KubectlScaleDeploymentsKeywords(BaseKeyword):
"""
Keyword class for scaling deployments
"""
def __init__(self, ssh_connection):
"""
Constructor
Args:
ssh_connection:
"""
self.ssh_connection = ssh_connection
def scale_deployment(self, deployment_name: str, replicas: int, namespace: str = None) -> str:
"""
Scales the given deployment to the specified number of replicas.
Args:
deployment_name (str): the deployment name
replicas (int): number of replicas
namespace (str): the namespace
Returns: the output of the scale command
"""
cmd = f"kubectl scale deployment {deployment_name} --replicas={replicas}"
if namespace:
cmd = f"{cmd} -n {namespace}"
output = self.ssh_connection.send(export_k8s_config(cmd))
self.validate_success_return_code(self.ssh_connection)
return output

View File

@@ -0,0 +1,60 @@
class KubectlDeploymentObject:
"""
Represents a Kubernetes deployment.
"""
def __init__(self, name: str):
self.name = name
self.ready = None
self.up_to_date = None
self.available = None
self.age = None
def set_ready(self, ready: str):
self.ready = ready
def get_ready(self) -> str:
"""
Getter for READY entry
Returns: The readiness status of the deployment.
"""
return self.ready
def set_up_to_date(self, up_to_date: str):
self.up_to_date = up_to_date
def get_up_to_date(self) -> str:
"""
Getter for UP-TO-DATE entry
Returns: The up-to-date status of the deployment.
"""
return self.up_to_date
def set_available(self, available: str):
self.available = available
def get_available(self) -> str:
"""
Getter for AVAILABLE entry
Returns: The availability status of the deployment.
"""
return self.available
def set_age(self, age: str):
self.age = age
def get_age(self) -> str:
"""
Getter for AGE entry
Returns: The age of the deployment.
"""
return self.age
def get_name(self) -> str:
"""
Getter for NAME entry
Returns: The name of the deployment."""
return self.name
def __repr__(self):
return f"<KubectlDeploymentObject name={self.name} ready={self.ready} up_to_date={self.up_to_date} available={self.available} age={self.age}>"

View File

@@ -0,0 +1,59 @@
from keywords.k8s.deployments.object.kubectl_deployments_object import KubectlDeploymentObject
from keywords.k8s.deployments.object.kubectl_get_deployments_table_parser import KubectlGetDeploymentTableParser
class KubectlGetDeploymentOutput:
"""
Class for output of the get deployment command
"""
def __init__(self, kubectl_get_deployment_output: str):
"""
Constructor
Args:
kubectl_get_deployment_output (str): Raw string output from running a "kubectl get deployment" command.
"""
self.kubectl_deployments: [KubectlDeploymentObject] = []
parser = KubectlGetDeploymentTableParser(kubectl_get_deployment_output)
output_values_list = parser.get_output_values_list()
for deployment_dict in output_values_list:
if "NAME" not in deployment_dict:
continue
deployment = KubectlDeploymentObject(deployment_dict["NAME"])
if "READY" in deployment_dict:
deployment.set_ready(deployment_dict["READY"])
if "UP-TO-DATE" in deployment_dict:
deployment.set_up_to_date(deployment_dict["UP-TO-DATE"])
if "AVAILABLE" in deployment_dict:
deployment.set_available(deployment_dict["AVAILABLE"])
if "AGE" in deployment_dict:
deployment.set_age(deployment_dict["AGE"])
self.kubectl_deployments.append(deployment)
def get_deployments(self):
"""
This function will get the list of all deployments available.
Returns: List of KubectlDeploymentObjects
"""
return self.kubectl_deployments
def is_deployment(self, deployment_name: str) -> bool:
"""
This function will get the deployment with the name specified from this get_deployment_output.
Args:
deployment_name (str): The name of the deployment of interest.
Returns:
bool: This function return a bool value.
"""
for dep in self.kubectl_deployments:
if dep.get_name() == deployment_name:
return True
else:
return False

View File

@@ -0,0 +1,22 @@
from keywords.k8s.k8s_table_parser_base import K8sTableParserBase
class KubectlGetDeploymentTableParser(K8sTableParserBase):
"""
Class for parsing the output of "kubectl get deployment" commands.
"""
def __init__(self, k8s_output):
"""
Constructor
Args:
k8s_output: The raw String output of a kubernetes command that returns a table.
"""
super().__init__(k8s_output)
self.possible_headers = [
"NAME",
"READY",
"UP-TO-DATE",
"AVAILABLE",
"AGE",
]

View File

@@ -19,17 +19,18 @@ class KubectlApplyPodsKeywords(BaseKeyword):
"""
self.ssh_connection = ssh_connection
def apply_from_yaml(self, yaml_file: str) -> None:
def apply_from_yaml(self, yaml_file: str, namespace: str = None) -> None:
"""
Applies a pod yaml config
Args:
yaml_file (str): the yaml file
namespace (str, optional): the namespace to apply the yaml to
Returns: None
"""
self.ssh_connection.send(export_k8s_config(f"kubectl apply -f {yaml_file}"))
ns_arg = f"-n {namespace}" if namespace else ""
self.ssh_connection.send(export_k8s_config(f"kubectl apply -f {yaml_file} {ns_arg}"))
self.validate_success_return_code(self.ssh_connection)
def fail_apply_from_yaml(self, yaml_file: str) -> None:

View File

@@ -16,7 +16,7 @@ class KubectlDeleteServiceKeywords(BaseKeyword):
"""
self.ssh_connection = ssh_connection
def delete_service(self, service_name: str) -> str:
def delete_service(self, service_name: str, namespace: str = None) -> str:
"""
Deletes the given service
Args:
@@ -25,7 +25,10 @@ class KubectlDeleteServiceKeywords(BaseKeyword):
Returns: the output of the cmd
"""
output = self.ssh_connection.send(export_k8s_config(f"kubectl delete service {service_name}"))
cmd = f"kubectl delete service {service_name}"
if namespace:
cmd = f"{cmd} -n {namespace}"
output = self.ssh_connection.send(export_k8s_config(cmd))
self.validate_success_return_code(self.ssh_connection)
return output

View File

@@ -1,7 +1,7 @@
from keywords.base_keyword import BaseKeyword
from keywords.k8s.k8s_command_wrapper import export_k8s_config
from keywords.k8s.service.object.kubectl_get_service_output import KubectlGetServiceOutput
from keywords.k8s.service.object.kubectl_service_object import KubectlServiceObject
from keywords.k8s.service.object.kubectl_get_service_output import KubectlGetServicesOutput
from keywords.k8s.service.object.kubectl_service_object import KubectlServicesObject
class KubectlGetServiceKeywords(BaseKeyword):
@@ -17,7 +17,7 @@ class KubectlGetServiceKeywords(BaseKeyword):
"""
self.ssh_connection = ssh_connection
def get_service(self, service_name: str) -> KubectlServiceObject:
def get_service(self, service_name: str) -> KubectlServicesObject:
"""
Gets the service
Args:
@@ -28,9 +28,27 @@ class KubectlGetServiceKeywords(BaseKeyword):
"""
kubectl_get_service_output = self.ssh_connection.send(export_k8s_config(f"kubectl get service {service_name}"))
self.validate_success_return_code(self.ssh_connection)
service_list_output = KubectlGetServiceOutput(kubectl_get_service_output)
service_list_output = KubectlGetServicesOutput(kubectl_get_service_output)
return service_list_output.get_service(service_name)
def get_services(self, namespace: str = None) -> KubectlServicesObject:
"""
Gets the services from a namespace
Args:
namespace (): the namespace
Returns: KubectlServiceObject
"""
cmd = f"kubectl get services"
if namespace:
cmd = f"{cmd} -n {namespace}"
kubectl_get_services_output = self.ssh_connection.send(export_k8s_config(cmd))
self.validate_success_return_code(self.ssh_connection)
services_list_output = KubectlGetServicesOutput(kubectl_get_services_output)
return services_list_output
def get_service_node_port(self, service_name) -> str:
jsonpath = 'jsonpath="{.spec.ports[0].nodePort}"'

View File

@@ -1,13 +1,13 @@
from keywords.k8s.service.object.kubectl_get_service_table_parser import KubectlGetServiceTableParser
from keywords.k8s.service.object.kubectl_service_object import KubectlServiceObject
from keywords.k8s.service.object.kubectl_get_service_table_parser import KubectlGetServicesTableParser
from keywords.k8s.service.object.kubectl_service_object import KubectlServicesObject
class KubectlGetServiceOutput:
class KubectlGetServicesOutput:
"""
Class for output of the get service command
"""
def __init__(self, kubectl_get_service_output: str):
def __init__(self, kubectl_get_services_output: str):
"""
Constructor
@@ -16,16 +16,16 @@ class KubectlGetServiceOutput:
"""
self.kubectl_service: [KubectlServiceObject] = []
kubectl_get_service_table_parser = KubectlGetServiceTableParser(kubectl_get_service_output)
output_values_list = kubectl_get_service_table_parser.get_output_values_list()
self.kubectl_services: [KubectlServicesObject] = []
kubectl_get_services_table_parser = KubectlGetServicesTableParser(kubectl_get_services_output)
output_values_list = kubectl_get_services_table_parser.get_output_values_list()
for service_dict in output_values_list:
if 'NAME' not in service_dict:
raise ValueError(f"There is no NAME associated with the service: {service_dict}")
service = KubectlServiceObject(service_dict['NAME'])
service = KubectlServicesObject(service_dict['NAME'])
if 'TYPE' in service_dict:
service.set_type(service_dict['TYPE'])
@@ -42,19 +42,31 @@ class KubectlGetServiceOutput:
if 'PORT(S)' in service_dict:
service.set_ports(service_dict['PORT(S)'])
self.kubectl_service.append(service)
self.kubectl_services.append(service)
def get_service(self, service_name) -> KubectlServiceObject:
def get_service(self, service_name) -> KubectlServicesObject:
"""
This function will get the service with the name specified from this get_service_output.
Args:
service_name: The name of the service of interest.
Returns: KubectlServiceObject
Returns: KubectlServicesObject
"""
for service_name_object in self.kubectl_service:
for service_name_object in self.kubectl_services:
if service_name_object.get_name() == service_name:
return service_name_object
else:
raise ValueError(f"There is no service with the name {service_name}.")
def get_services(self) -> KubectlServicesObject:
"""
This function will get the services within the namespace specified from this kubectl_get_service_keywords.
Args:
namespace: The namespace of the services of interest.
Returns: KubectlServicesObject
"""
return self.kubectl_services

View File

@@ -1,7 +1,7 @@
from keywords.k8s.k8s_table_parser_base import K8sTableParserBase
class KubectlGetServiceTableParser(K8sTableParserBase):
class KubectlGetServicesTableParser(K8sTableParserBase):
"""
Class for parsing the output of "kubectl get service" commands.
"""

View File

@@ -1,4 +1,4 @@
class KubectlServiceObject:
class KubectlServicesObject:
"""
Class to hold attributes of a 'kubectl get service' entry.
"""

View File

@@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: besteffort-large
labels:
app: besteffort-large
spec:
replicas: 1
selector:
matchLabels:
app: besteffort-large
template:
metadata:
labels:
app: besteffort-large
spec:
containers:
- name: besteffort-large
image: registry.local:9001/centos/tools:latest
imagePullPolicy: IfNotPresent
command: [ "/bin/bash", "-c", "--" ]
args: [ "while true; do sleep 300000; done;" ]
volumeMounts:
- name: dummy
mountPath: /etc/dummy
resources: {}
restartPolicy: Always
imagePullSecrets:
- name: regcred
volumes:
- name: dummy
hostPath:
path: "/home/sysadmin"

View File

@@ -0,0 +1,75 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: burstable-stress-large
labels:
app: burstable-stress-large
spec:
replicas: 1
selector:
matchLabels:
app: burstable-stress-large
template:
metadata:
labels:
app: burstable-stress-large
spec:
containers:
- name: burstable-stress-large
image: registry.local:9001/alexeiled/stress-ng
imagePullPolicy: IfNotPresent
command: ["/stress-ng"]
args: ["--random", "5", "--metrics-brief", "--timeout", "1h", "-v", "--log-file", "/output/$(POD_NAME)-$(NODE_NAME).log", "--yaml", "/output/$(POD_NAME)-$(NODE_NAME).yaml" ]
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: "25m"
memory: "1Gi"
limits:
cpu: "50m"
memory: "5Gi"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: burstable-stress-large
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: burstable-stress-large
resource: requests.memory
- name: LIM_CPU
valueFrom:
resourceFieldRef:
containerName: burstable-stress-large
resource: limits.cpu
- name: LIM_MEM
valueFrom:
resourceFieldRef:
containerName: burstable-stress-large
resource: limits.memory
restartPolicy: Always
imagePullSecrets:
- name: regcred
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/stress"

View File

@@ -0,0 +1,75 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guaranteed-fio-large
labels:
app: guaranteed-fio-large
spec:
replicas: 1
selector:
matchLabels:
app: guaranteed-fio-large
template:
metadata:
labels:
app: guaranteed-fio-large
spec:
containers:
- name: guaranteed-fio-large
image: registry.local:9001/datawiseio/fio:latest
imagePullPolicy: IfNotPresent
command:
- /bin/bash
- -ec
- |
STAMP=$(date +'%F_%H%M%S')
MY_CPUS=$(awk '/Cpus_allowed_list:/ {print $2}' < /proc/self/status)
/usr/bin/fio \
--name=k8sfio \
--ioengine=cpuio --cpuload=80 --cpuchunks=1000
--numjobs=$(REQ_CPU) \
--cpus_allowed=$MY_CPUS --cpus_allowed_policy=split \
--runtime=10m --time_based --log_avg_msec=1000 \
--output=/output/$(POD_NAME)-$(NODE_NAME)-$STAMP.log
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: "500m"
memory: "1Gi"
limits:
cpu: "500m"
memory: "1Gi"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: guaranteed-fio-large
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: guaranteed-fio-large
resource: requests.memory
restartPolicy: Always
imagePullSecrets:
- name: regcred
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/"

View File

@@ -0,0 +1,48 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: huge-1g-large
annotations:
k8s.v1.cni.cncf.io/networks: sriov-data0
labels:
app: huge-1g-large
spec:
replicas: 1
selector:
matchLabels:
app: huge-1g-large
template:
metadata:
labels:
app: huge-1g-large
spec:
containers:
- name: huge-1g-large
image: registry.local:9001/centos/tools:latest
imagePullPolicy: IfNotPresent
command: [ "/bin/bash", "-c", "--" ]
args: [ "while true; do sleep 300000; done;" ]
volumeMounts:
- name: hugepage
mountPath: /hugepages
- name: dummy
mountPath: /etc/dummy
resources:
requests:
cpu: 1
memory: "1Gi"
hugepages-1Gi: 1Gi
limits:
cpu: 1
memory: "1Gi"
hugepages-1Gi: 1Gi
restartPolicy: Always
imagePullSecrets:
- name: regcred
volumes:
- name: hugepage
emptyDir:
medium: HugePages
- hostPath:
path: "/home/sysadmin"
name: dummy

View File

@@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sriov-large
annotations:
k8s.v1.cni.cncf.io/networks: sriov-data0
labels:
app: sriov-large
spec:
replicas: 1
selector:
matchLabels:
app: sriov-large
template:
metadata:
labels:
app: sriov-large
restart-on-reboot: 'true'
spec:
containers:
- name: sriov-large
image: registry.local:9001/centos/tools:latest
imagePullPolicy: IfNotPresent
command: [ "/bin/bash", "-c", "--" ]
args: [ "while true; do sleep 300000; done;" ]
volumeMounts:
- name: dummy
mountPath: /etc/dummy
resources:
requests:
cpu: 1
memory: "1Gi"
intel.com/pci_sriov_net_group0_data0: '1'
limits:
cpu: 1
memory: "1Gi"
intel.com/pci_sriov_net_group0_data0: '1'
restartPolicy: Always
imagePullSecrets:
- name: regcred
volumes:
- name: dummy
hostPath:
path: "/home/sysadmin"

View File

@@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: besteffort-mixed
labels:
app: besteffort-mixed
spec:
replicas: 1
selector:
matchLabels:
app: besteffort-mixed
template:
metadata:
labels:
app: besteffort-mixed
spec:
containers:
- name: besteffort-mixed
image: registry.local:9001/centos/tools:latest
imagePullPolicy: IfNotPresent
command: [ "/bin/bash", "-c", "--" ]
args: [ "while true; do sleep 300000; done;" ]
volumeMounts:
- name: dummy
mountPath: /etc/dummy
resources: {}
restartPolicy: Always
imagePullSecrets:
- name: regcred
volumes:
- name: dummy
hostPath:
path: "/home/sysadmin"

View File

@@ -0,0 +1,75 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: burstable-stress-mixed
labels:
app: burstable-stress-mixed
spec:
replicas: 1
selector:
matchLabels:
app: burstable-stress-mixed
template:
metadata:
labels:
app: burstable-stress-mixed
spec:
containers:
- name: burstable-stress-mixed
image: registry.local:9001/alexeiled/stress-ng
imagePullPolicy: IfNotPresent
command: ["/stress-ng"]
args: ["--random", "5", "--metrics-brief", "--timeout", "1h", "-v", "--log-file", "/output/$(POD_NAME)-$(NODE_NAME).log", "--yaml", "/output/$(POD_NAME)-$(NODE_NAME).yaml" ]
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: "25m"
memory: "200Mi"
limits:
cpu: "50m"
memory: "200Mi"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: burstable-stress-mixed
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: burstable-stress-mixed
resource: requests.memory
- name: LIM_CPU
valueFrom:
resourceFieldRef:
containerName: burstable-stress-mixed
resource: limits.cpu
- name: LIM_MEM
valueFrom:
resourceFieldRef:
containerName: burstable-stress-mixed
resource: limits.memory
restartPolicy: Always
imagePullSecrets:
- name: regcred
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/stress"

View File

@@ -0,0 +1,75 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guaranteed-fio-mixed
labels:
app: guaranteed-fio-mixed
spec:
replicas: 1
selector:
matchLabels:
app: guaranteed-fio-mixed
template:
metadata:
labels:
app: guaranteed-fio-mixed
spec:
containers:
- name: guaranteed-fio-mixed
image: registry.local:9001/datawiseio/fio:latest
imagePullPolicy: IfNotPresent
command:
- /bin/bash
- -ec
- |
STAMP=$(date +'%F_%H%M%S')
MY_CPUS=$(awk '/Cpus_allowed_list:/ {print $2}' < /proc/self/status)
/usr/bin/fio \
--name=k8sfio \
--ioengine=cpuio --cpuload=80 --cpuchunks=1000
--numjobs=$(REQ_CPU) \
--cpus_allowed=$MY_CPUS --cpus_allowed_policy=split \
--runtime=10m --time_based --log_avg_msec=1000 \
--output=/output/$(POD_NAME)-$(NODE_NAME)-$STAMP.log
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: "500m"
memory: "1Gi"
limits:
cpu: "500m"
memory: "1Gi"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: guaranteed-fio-mixed
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: guaranteed-fio-mixed
resource: requests.memory
restartPolicy: Always
imagePullSecrets:
- name: regcred
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/"

View File

@@ -0,0 +1,33 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: huge-1g-mixed
labels:
app: huge-1g-mixed
spec:
replicas: 1
selector:
matchLabels:
app: huge-1g-mixed
template:
metadata:
labels:
app: huge-1g-mixed
spec:
containers:
- image: registry.local:9001/gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4
name: huge-1g-mixed
ports:
- containerPort: 8080
resources:
requests:
cpu: 1
memory: "1Gi"
hugepages-1Gi: 1Gi
limits:
cpu: 1
memory: "1Gi"
hugepages-1Gi: 1Gi
restartPolicy: Always
imagePullSecrets:
- name: regcred

View File

@@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sriov-mixed
annotations:
k8s.v1.cni.cncf.io/networks: sriov-data0
labels:
app: sriov-mixed
spec:
replicas: 1
selector:
matchLabels:
app: sriov-mixed
template:
metadata:
labels:
app: sriov-mixed
restart-on-reboot: 'true'
spec:
containers:
- name: sriov-mixed
image: registry.local:9001/centos/tools:latest
imagePullPolicy: IfNotPresent
command: [ "/bin/bash", "-c", "--" ]
args: [ "while true; do sleep 300000; done;" ]
volumeMounts:
- name: dummy
mountPath: /etc/dummy
resources:
requests:
cpu: 1
memory: "1Gi"
intel.com/pci_sriov_net_group0_data0: '1'
limits:
cpu: 1
memory: "1Gi"
intel.com/pci_sriov_net_group0_data0: '1'
restartPolicy: Always
imagePullSecrets:
- name: regcred
volumes:
- name: dummy
hostPath:
path: "/home/sysadmin"

View File

@@ -0,0 +1,29 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: small
labels:
app: small
spec:
replicas: 1
selector:
matchLabels:
app: small
template:
metadata:
labels:
app: small
spec:
containers:
- image: gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4
name: small
ports:
- containerPort: 8080
resources:
requests:
cpu: "100m"
memory: "200Mi"
limits:
cpu: "100m"
memory: "200Mi"
restartPolicy: Always

View File

@@ -0,0 +1,44 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sriov-small
annotations:
k8s.v1.cni.cncf.io/networks: sriov-data0
labels:
app: sriov-small
spec:
replicas: 1
selector:
matchLabels:
app: sriov-small
template:
metadata:
labels:
app: sriov-small
restart-on-reboot: 'true'
spec:
containers:
- name: sriov-small
image: registry.local:9001/centos/tools:latest
imagePullPolicy: IfNotPresent
command: [ "/bin/bash", "-c", "--" ]
args: [ "while true; do sleep 300000; done;" ]
volumeMounts:
- name: dummy
mountPath: /etc/dummy
resources:
requests:
cpu: 1
memory: "1Gi"
intel.com/pci_sriov_net_group0_data0: '1'
limits:
cpu: 1
memory: "1Gi"
intel.com/pci_sriov_net_group0_data0: '1'
restartPolicy: Always
imagePullSecrets:
- name: regcred
volumes:
- name: dummy
hostPath:
path: "/home/sysadmin"

View File

@@ -0,0 +1,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: besteffort
labels:
app: besteffort
spec:
replicas: 5
selector:
matchLabels:
app: besteffort
template:
metadata:
labels:
app: besteffort
spec:
containers:
- image: gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4
name: besteffort
ports:
- containerPort: 8080
resources: {}
restartPolicy: Always

View File

@@ -0,0 +1,72 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: burstable-random-stressor
labels:
app: burstable-random-stressor
spec:
replicas: 5
selector:
matchLabels:
app: burstable-random-stressor
template:
metadata:
labels:
app: burstable-random-stressor
spec:
containers:
- name: burstable-random-stressor
image: alexeiled/stress-ng
command: ["/stress-ng"]
args: ["--random", "5", "--metrics-brief", "--timeout", "5m", "-v", "--log-file", "/output/$(POD_NAME)-$(NODE_NAME).log", "--yaml", "/output/$(POD_NAME)-$(NODE_NAME).yaml" ]
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: "25m"
memory: "100Mi"
limits:
cpu: "50m"
memory: "200Mi"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: burstable-random-stressor
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: burstable-random-stressor
resource: requests.memory
- name: LIM_CPU
valueFrom:
resourceFieldRef:
containerName: burstable-random-stressor
resource: limits.cpu
- name: LIM_MEM
valueFrom:
resourceFieldRef:
containerName: burstable-random-stressor
resource: limits.memory
restartPolicy: Always
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/stress"

View File

@@ -0,0 +1,72 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guaranteed-cpu-stressor
labels:
app: guaranteed-cpu-stressor
spec:
replicas: 5
selector:
matchLabels:
app: guaranteed-cpu-stressor
template:
metadata:
labels:
app: guaranteed-cpu-stressor
spec:
containers:
- name: guaranteed-cpu-stressor
image: alexeiled/stress-ng
command: ["/stress-ng"]
args: ["--cpu", "1", "--metrics-brief", "--timeout", "5m", "-v", "--log-file", "/output/$(POD_NAME)-$(NODE_NAME).log", "--yaml", "/output/$(POD_NAME)-$(NODE_NAME).yaml" ]
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: "500m"
memory: "256Mi"
limits:
cpu: "500m"
memory: "256Mi"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: guaranteed-cpu-stressor
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: guaranteed-cpu-stressor
resource: requests.memory
- name: LIM_CPU
valueFrom:
resourceFieldRef:
containerName: guaranteed-cpu-stressor
resource: limits.cpu
- name: LIM_MEM
valueFrom:
resourceFieldRef:
containerName: guaranteed-cpu-stressor
resource: limits.memory
restartPolicy: Always
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/stress"

View File

@@ -0,0 +1,31 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: huge-1g
labels:
app: huge-1g
spec:
replicas: 5
selector:
matchLabels:
app: huge-1g
template:
metadata:
labels:
app: huge-1g
spec:
containers:
- image: gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4
name: huge-1g
ports:
- containerPort: 8080
resources:
requests:
cpu: "500m"
memory: "1Gi"
hugepages-1Gi: "1Gi"
limits:
cpu: "500m"
memory: "1Gi"
hugepages-1Gi: "1Gi"
restartPolicy: Always

View File

@@ -0,0 +1,76 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sriov-io-stressor
annotations:
k8s.v1.cni.cncf.io/networks: sriov-data0
labels:
app: sriov-io-stressor
spec:
replicas: 5
selector:
matchLabels:
app: sriov-io-stressor
template:
metadata:
labels:
app: sriov-io-stressor
spec:
containers:
- name: sriov-io-stressor
image: alexeiled/stress-ng
command: ["/stress-ng"]
args: ["--sequential", "1", "--class", "io", "--metrics-brief", "--timeout", "5m", "-v", "--log-file", "/output/$(POD_NAME)-$(NODE_NAME).log", "--yaml", "/output/$(POD_NAME)-$(NODE_NAME).yaml" ]
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: "100m"
memory: "256Mi"
intel.com/pci_sriov_net_group0_data0: '1'
limits:
cpu: "100m"
memory: "256Mi"
intel.com/pci_sriov_net_group0_data0: '1'
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: sriov-io-stressor
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: sriov-io-stressor
resource: requests.memory
- name: LIM_CPU
valueFrom:
resourceFieldRef:
containerName: sriov-io-stressor
resource: limits.cpu
- name: LIM_MEM
valueFrom:
resourceFieldRef:
containerName: sriov-io-stressor
resource: limits.memory
restartPolicy: Always
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/stress"

View File

@@ -0,0 +1,26 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: besteffort
labels:
app: besteffort
spec:
replicas: 5
selector:
matchLabels:
app: besteffort
template:
metadata:
labels:
app: besteffort
spec:
containers:
- image: registry.local:9001/gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4
imagePullPolicy: IfNotPresent
name: besteffort
ports:
- containerPort: 8080
resources: {}
restartPolicy: Always
imagePullSecrets:
- name: regcred

View File

@@ -0,0 +1,75 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: burstable-random-stressor
labels:
app: burstable-random-stressor
spec:
replicas: 5
selector:
matchLabels:
app: burstable-random-stressor
template:
metadata:
labels:
app: burstable-random-stressor
spec:
containers:
- name: burstable-random-stressor
image: registry.local:9001/alexeiled/stress-ng
imagePullPolicy: IfNotPresent
command: ["/stress-ng"]
args: ["--random", "5", "--metrics-brief", "--timeout", "1h", "-v", "--log-file", "/output/$(POD_NAME)-$(NODE_NAME).log", "--yaml", "/output/$(POD_NAME)-$(NODE_NAME).yaml" ]
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: "25m"
memory: "100Mi"
limits:
cpu: "50m"
memory: "200Mi"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: burstable-random-stressor
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: burstable-random-stressor
resource: requests.memory
- name: LIM_CPU
valueFrom:
resourceFieldRef:
containerName: burstable-random-stressor
resource: limits.cpu
- name: LIM_MEM
valueFrom:
resourceFieldRef:
containerName: burstable-random-stressor
resource: limits.memory
restartPolicy: Always
imagePullSecrets:
- name: regcred
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/stress"

View File

@@ -0,0 +1,75 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: guaranteed-cpu-stressor
labels:
app: guaranteed-cpu-stressor
spec:
replicas: 5
selector:
matchLabels:
app: guaranteed-cpu-stressor
template:
metadata:
labels:
app: guaranteed-cpu-stressor
spec:
containers:
- name: guaranteed-cpu-stressor
image: registry.local:9001/alexeiled/stress-ng
imagePullPolicy: IfNotPresent
command: ["/stress-ng"]
args: ["--cpu", "1", "--metrics-brief", "--timeout", "1h", "-v", "--log-file", "/output/$(POD_NAME)-$(NODE_NAME).log", "--yaml", "/output/$(POD_NAME)-$(NODE_NAME).yaml" ]
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: "500m"
memory: "256Mi"
limits:
cpu: "500m"
memory: "256Mi"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: guaranteed-cpu-stressor
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: guaranteed-cpu-stressor
resource: requests.memory
- name: LIM_CPU
valueFrom:
resourceFieldRef:
containerName: guaranteed-cpu-stressor
resource: limits.cpu
- name: LIM_MEM
valueFrom:
resourceFieldRef:
containerName: guaranteed-cpu-stressor
resource: limits.memory
restartPolicy: Always
imagePullSecrets:
- name: regcred
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/stress"

View File

@@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: huge-1g
labels:
app: huge-1g
spec:
replicas: 5
selector:
matchLabels:
app: huge-1g
template:
metadata:
labels:
app: huge-1g
spec:
containers:
- image: registry.local:9001/gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4
imagePullPolicy: IfNotPresent
name: huge-1g
ports:
- containerPort: 8080
resources:
requests:
cpu: "500m"
memory: "1Gi"
hugepages-1Gi: "1Gi"
limits:
cpu: "500m"
memory: "1Gi"
hugepages-1Gi: "1Gi"
restartPolicy: Always
imagePullSecrets:
- name: regcred

View File

@@ -0,0 +1,79 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: sriov-io-stressor
annotations:
k8s.v1.cni.cncf.io/networks: sriov-data0
labels:
app: sriov-io-stressor
spec:
replicas: 5
selector:
matchLabels:
app: sriov-io-stressor
template:
metadata:
labels:
app: sriov-io-stressor
spec:
containers:
- name: sriov-io-stressor
image: registry.local:9001/alexeiled/stress-ng
imagePullPolicy: IfNotPresent
command: ["/stress-ng"]
args: ["--sequential", "1", "--class", "io", "--metrics-brief", "--timeout", "1h", "-v", "--log-file", "/output/$(POD_NAME)-$(NODE_NAME).log", "--yaml", "/output/$(POD_NAME)-$(NODE_NAME).yaml" ]
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: "100m"
memory: "256Mi"
intel.com/pci_sriov_net_group0_data0: '1'
limits:
cpu: "100m"
memory: "256Mi"
intel.com/pci_sriov_net_group0_data0: '1'
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: sriov-io-stressor
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: sriov-io-stressor
resource: requests.memory
- name: LIM_CPU
valueFrom:
resourceFieldRef:
containerName: sriov-io-stressor
resource: limits.cpu
- name: LIM_MEM
valueFrom:
resourceFieldRef:
containerName: sriov-io-stressor
resource: limits.memory
restartPolicy: Always
imagePullSecrets:
- name: regcred
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/stress"

View File

@@ -0,0 +1,19 @@
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: sriov-data0
annotations:
k8s.v1.cni.cncf.io/resourceName: intel.com/pci_sriov_net_group0_data0
spec:
config: '{
"type": "sriov",
"name": "sriov-data0",
"ipam": {
"type": "host-local",
"subnet": "10.56.217.0/24",
"routes": [{
"dst": "0.0.0.0/0"
}],
"gateway": "10.56.217.1"
}
}'

View File

@@ -0,0 +1,63 @@
apiVersion: v1
kind: Pod
metadata:
name: cpu-stress-app
labels:
app: cpu-stress-app
spec:
restartPolicy: OnFailure
containers:
- name: cpu-stress-app
image: alexeiled/stress-ng
imagePullPolicy: IfNotPresent
command: ["/stress-ng"]
args: ["--cpu", "$(REQ_CPU)", "--metrics-brief", "--timeout", "48h", "-v", "--log-file", "/output/$(POD_NAME)-$(NODE_NAME).log", "--yaml", "/output/$(POD_NAME)-$(NODE_NAME).yaml" ]
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: 2
memory: "1Gi"
limits:
cpu: 2
memory: "1Gi"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: cpu-stress-app
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: cpu-stress-app
resource: requests.memory
- name: LIM_CPU
valueFrom:
resourceFieldRef:
containerName: cpu-stress-app
resource: limits.cpu
- name: LIM_MEM
valueFrom:
resourceFieldRef:
containerName: cpu-stress-app
resource: limits.memory
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/stress"

View File

@@ -0,0 +1,63 @@
apiVersion: v1
kind: Pod
metadata:
name: disk-stress-app
labels:
app: disk-stress-app
spec:
restartPolicy: OnFailure
containers:
- name: disk-stress-app
image: alexeiled/stress-ng
imagePullPolicy: IfNotPresent
command: ["/stress-ng"]
args: ["--disk", "2", "--metrics-brief", "--timeout", "10m", "-v", "--log-file", "/output/$(POD_NAME)-$(NODE_NAME).log", "--yaml", "/output/$(POD_NAME)-$(NODE_NAME).yaml" ]
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: "500m"
memory: "256Mi"
limits:
cpu: "500m"
memory: "256Mi"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: disk-stress-app
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: disk-stress-app
resource: requests.memory
- name: LIM_CPU
valueFrom:
resourceFieldRef:
containerName: disk-stress-app
resource: limits.cpu
- name: LIM_MEM
valueFrom:
resourceFieldRef:
containerName: disk-stress-app
resource: limits.memory
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/stress"

View File

@@ -0,0 +1,63 @@
apiVersion: v1
kind: Pod
metadata:
name: io-stress-app
labels:
app: io-stress-app
spec:
restartPolicy: OnFailure
containers:
- name: io-stress-app
image: alexeiled/stress-ng
imagePullPolicy: IfNotPresent
command: ["/stress-ng"]
args: ["--sequential", "8", "--class", "io", "--metrics-brief", "--timeout", "48h", "-v", "--log-file", "/output/$(POD_NAME)-$(NODE_NAME).log", "--yaml", "/output/$(POD_NAME)-$(NODE_NAME).yaml" ]
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: "200m"
memory: "256Mi"
limits:
cpu: "200m"
memory: "256Mi"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: io-stress-app
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: io-stress-app
resource: requests.memory
- name: LIM_CPU
valueFrom:
resourceFieldRef:
containerName: io-stress-app
resource: limits.cpu
- name: LIM_MEM
valueFrom:
resourceFieldRef:
containerName: io-stress-app
resource: limits.memory
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/stress"

View File

@@ -0,0 +1,63 @@
apiVersion: v1
kind: Pod
metadata:
name: memory-stress-app
labels:
app: memory-stress-app
spec:
restartPolicy: OnFailure
containers:
- name: memory-stress-app
image: alexeiled/stress-ng
imagePullPolicy: IfNotPresent
command: ["/stress-ng"]
args: ["--vm", "2", "--vm-bytes", "256M", "--metrics-brief", "--timeout", "48h", "-v", "--log-file", "/output/$(POD_NAME)-$(NODE_NAME).log", "--yaml", "/output/$(POD_NAME)-$(NODE_NAME).yaml" ]
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: 1
memory: "5Gi"
limits:
cpu: 1
memory: "5Gi"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: memory-stress-app
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: memory-stress-app
resource: requests.memory
- name: LIM_CPU
valueFrom:
resourceFieldRef:
containerName: memory-stress-app
resource: limits.cpu
- name: LIM_MEM
valueFrom:
resourceFieldRef:
containerName: memory-stress-app
resource: limits.memory
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/stress"

View File

@@ -0,0 +1,63 @@
apiVersion: v1
kind: Pod
metadata:
name: random-stress-app
labels:
app: random-stress-app
spec:
restartPolicy: OnFailure
containers:
- name: random-stress-app
image: alexeiled/stress-ng
imagePullPolicy: IfNotPresent
command: ["/stress-ng"]
args: ["--random", "10", "--metrics-brief", "--timeout", "48h", "-v", "--log-file", "/output/$(POD_NAME)-$(NODE_NAME).log", "--yaml", "/output/$(POD_NAME)-$(NODE_NAME).yaml" ]
volumeMounts:
- name: scratch-volume
mountPath: /scratch
- name: output-volume
mountPath: /output
resources:
requests:
cpu: 1
memory: "512Mi"
limits:
cpu: 1
memory: "512Mi"
env:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: REQ_CPU
valueFrom:
resourceFieldRef:
containerName: random-stress-app
resource: requests.cpu
- name: REQ_MEM
valueFrom:
resourceFieldRef:
containerName: random-stress-app
resource: requests.memory
- name: LIM_CPU
valueFrom:
resourceFieldRef:
containerName: random-stress-app
resource: limits.cpu
- name: LIM_MEM
valueFrom:
resourceFieldRef:
containerName: random-stress-app
resource: limits.memory
volumes:
- name: scratch-volume
emptyDir:
medium: Memory
sizeLimit: 1Gi
- name: output-volume
hostPath:
path: "/home/sysadmin/stress"

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Service
metadata:
name: besteffort
spec:
ports:
- port: 8080
type: LoadBalancer

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Service
metadata:
name: burstable
spec:
ports:
- port: 8080
type: LoadBalancer

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Service
metadata:
name: guaranteed
spec:
ports:
- port: 8080
type: LoadBalancer

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Service
metadata:
name: huge-1g
spec:
ports:
- port: 8080
type: LoadBalancer

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Service
metadata:
name: small
spec:
ports:
- port: 8080
type: LoadBalancer

View File

@@ -0,0 +1,8 @@
apiVersion: v1
kind: Service
metadata:
name: sriov
spec:
ports:
- port: 8080
type: LoadBalancer

Binary file not shown.

BIN
resources/images/stress-ng.tar Executable file

Binary file not shown.

View File

@@ -0,0 +1,260 @@
from time import time
from pytest import mark
from config.configuration_manager import ConfigurationManager
from framework.logging.automation_logger import get_logger
from framework.resources.resource_finder import get_stx_resource_path
from keywords.cloud_platform.ssh.lab_connection_keywords import LabConnectionKeywords
from keywords.docker.images.docker_load_image_keywords import DockerLoadImageKeywords
from keywords.docker.login.docker_login_keywords import DockerLoginKeywords
from keywords.files.file_keywords import FileKeywords
from keywords.k8s.namespace.kubectl_get_namespaces_keywords import KubectlGetNamespacesKeywords
from keywords.k8s.namespace.kubectl_create_namespace_keywords import KubectlCreateNamespacesKeywords
from keywords.k8s.secret.kubectl_create_secret_keywords import KubectlCreateSecretsKeywords
from keywords.k8s.pods.kubectl_apply_pods_keywords import KubectlApplyPodsKeywords
from keywords.k8s.deployments.kubectl_delete_deployments_keywords import KubectlDeleteDeploymentsKeywords
from keywords.k8s.service.kubectl_delete_service_keywords import KubectlDeleteServiceKeywords
from keywords.k8s.service.kubectl_get_service_keywords import KubectlGetServiceKeywords
from keywords.k8s.namespace.kubectl_delete_namespace_keywords import KubectlDeleteNamespaceKeywords
from keywords.k8s.deployments.kubectl_scale_deployements_keywords import KubectlScaleDeploymentsKeywords
from keywords.k8s.deployments.kubectl_get_deployments_keywords import KubectlGetDeploymentsKeywords
import os
from keywords.k8s.pods.kubectl_get_pods_keywords import KubectlGetPodsKeywords
from framework.validation.validation import validate_equals
IMAGES = [
"kubernetes-e2e-test-images.tar",
"stress-ng.tar",
"resource-consumer.tar"
]
SCALE_FACTOR = 30
SERVICES_PATH = "resources/cloud_platform/system_test/pod_scaling/services"
DEPLOYMENTS_PATH = "resources/cloud_platform/system_test/pod_scaling/deployments"
@mark.p0
def test_deploy_small_benchmark(request):
"""
Deploys pods for the mixed benchmark type.
Scale up and down the deployments and measures the time taken for each operation.
Args:
request: pytest request object
"""
deploy_benchmark_pods(request, 'small')
@mark.p0
def test_deploy_benchmark_pods_mixed(request):
"""
Deploys pods for the mixed benchmark type.
Scale up and down the deployments and measures the time taken for each operation.
Args:
request: pytest request object
"""
deploy_benchmark_pods(request, 'mixed')
@mark.p0
def test_deploy_benchmark_pods_large(request):
"""
Deploys pods for the mixed benchmark type.
Scale up and down the deployments and measures the time taken for each operation.
Args:
request: pytest request object
"""
deploy_benchmark_pods(request, 'large')
def deploy_benchmark_pods(request, benchmark):
"""
Deploys pods for the selected benchmark type.
Scale up and down the deployments and mea'sures the time taken for each operation.
Args:
request: pytest request object
benchmark: The type of benchmark to run (mixed, large, small, stress)
"""
ssh_connection = LabConnectionKeywords().get_active_controller_ssh()
file_keywords = FileKeywords(ssh_connection)
namespace = f"{benchmark}-benchmark"
local_services_dir = get_stx_resource_path(SERVICES_PATH)
remote_services_dir = "/tmp/system_test/services"
local_deployments_dir = get_stx_resource_path(f"{DEPLOYMENTS_PATH}/{benchmark}")
remote_deployments_dir = f"/tmp/system_test/deployments/{benchmark}"
setup_docker_registry(ssh_connection, file_keywords, namespace, IMAGES)
setup_upload_files(local_services_dir, remote_services_dir, local_deployments_dir, remote_deployments_dir)
get_logger().log_info(f"Creating namespace '{namespace}'...")
ns_creator = KubectlCreateNamespacesKeywords(ssh_connection)
raw_namespace_obj = KubectlGetNamespacesKeywords(ssh_connection).get_namespaces()
namespace_objs = raw_namespace_obj.get_namespaces()
existing_namespaces = [ns.get_name() for ns in namespace_objs]
if namespace in existing_namespaces:
ns_destroyer = KubectlDeleteNamespaceKeywords(ssh_connection)
ns_destroyer.delete_namespace(namespace)
ns_creator.create_namespaces(namespace)
get_logger().log_info(f"Apply services YAMLs....")
service_files = file_keywords.get_files_in_dir(remote_services_dir)
pod_applier = KubectlApplyPodsKeywords(ssh_connection)
for svc_yaml in service_files:
pod_applier.apply_from_yaml(f"{remote_services_dir}/{svc_yaml}",namespace=namespace)
get_logger().log_info(f"Apply deployment YAMLs and calculate time....")
pod_getter = KubectlGetPodsKeywords(ssh_connection)
deployment_files = file_keywords.get_files_in_dir(remote_deployments_dir)
start_deploy = time()
for dep_yaml in deployment_files:
pod_applier.apply_from_yaml(f"{remote_deployments_dir}/{dep_yaml}",namespace=namespace)
validate_equals(
pod_getter.wait_for_all_pods_status(expected_statuses=["Running", "Completed"]),
True,
'Logs reached expected state')
deploy_time = time() - start_deploy
get_logger().log_info(f"Time to deploy pods for the first time: {deploy_time:.2f} seconds")
get_logger().log_info("Scaling up all deployments and calculating time...")
scale_up_time = scale_deployments(ssh_connection, SCALE_FACTOR, namespace)
get_logger().log_info(f"Time to scale up pods: {scale_up_time:.2f} seconds")
get_logger().log_info("Scaling down all deployments tand calculating time...")
scale_down_time = scale_deployments(ssh_connection, 0, namespace)
get_logger().log_info(f"Time to scale down pods: {scale_down_time:.2f} seconds")
def teardown():
deployments_output = KubectlGetDeploymentsKeywords(ssh_connection).get_deployments(namespace=namespace)
deployments_objs = deployments_output.get_deployments()
for deployment in [dep.get_name() for dep in deployments_objs]:
get_logger().log_info(f"Deleting deployment {deployment} in namespace {namespace}...")
KubectlDeleteDeploymentsKeywords(ssh_connection).delete_deployment(deployment, namespace=namespace)
services_output = KubectlGetServiceKeywords(ssh_connection).get_services(namespace)
services_obj = services_output.get_services()
for service in [svc.get_name() for svc in services_obj]:
get_logger().log_info(f"Deleting service {service} in namespace {namespace}...")
KubectlDeleteServiceKeywords(ssh_connection).delete_service(service, namespace=namespace)
KubectlDeleteNamespaceKeywords(ssh_connection).delete_namespace(namespace)
request.addfinalizer(teardown)
def setup_upload_files(
local_services_dir: str,
remote_services_dir: str,
local_deployments_dir: str,
remote_deployments_dir: str
) -> None:
"""
Uploads necessary files to the controller node for the pod scaling test.
Args:
local_services_dir (str): Path to the local directory containing service YAML files.
remote_services_dir (str): Path to the remote directory where service YAML files will be uploaded.
local_deployments_dir (str): Path to the local directory containing deployment YAML files.
remote_deployments_dir (str): Path to the remote directory where deployment YAML files will be uploaded.
"""
ssh_connection = LabConnectionKeywords().get_active_controller_ssh()
file_keywords = FileKeywords(ssh_connection)
get_logger().log_info(f"Uploading service yaml files ...")
file_keywords.create_directory(remote_services_dir)
for filename in os.listdir(local_services_dir):
local_file = os.path.join(local_services_dir, filename)
remote_file = f"{remote_services_dir}/{filename}"
if os.path.isfile(local_file):
file_keywords.upload_file(local_file, remote_file, overwrite=True)
get_logger().log_info(f"Uploading deployment yaml files ...")
file_keywords.create_directory(remote_deployments_dir)
for filename in os.listdir(local_deployments_dir):
local_file = os.path.join(local_deployments_dir, filename)
remote_file = f"{remote_deployments_dir}/{filename}"
if os.path.isfile(local_file):
file_keywords.upload_file(local_file, remote_file, overwrite=True)
get_logger().log_info(f"Uploading netdef yaml files ...")
local_netdef_file = get_stx_resource_path("resources/cloud_platform/system_test/pod_scaling/netdef-data0.yaml")
file_keywords.upload_file(local_netdef_file, f"{remote_deployments_dir}/netdef-data0.yaml", overwrite=True)
def setup_docker_registry(ssh_connection, file_keywords, namespace, images):
"""
Sets up the local Docker registry by logging in and pushing necessary images.
"""
ssh_connection = LabConnectionKeywords().get_active_controller_ssh()
docker_config = ConfigurationManager.get_docker_config()
local_registry = docker_config.get_registry("local_registry")
registry_url = local_registry.get_registry_url()
registry_user = local_registry.get_user_name()
registry_pass = local_registry.get_password()
docker_login = DockerLoginKeywords(ssh_connection)
docker_login.login(registry_user, registry_pass, registry_url)
# Step 1: Load, tag, and push images to local registry
image_folder = get_stx_resource_path("resources/images")
docker_login = DockerLoginKeywords(ssh_connection)
docker_login.login(registry_user, registry_pass, registry_url)
docker_loader = DockerLoadImageKeywords(ssh_connection)
for image_file in images:
local_path = f"{image_folder}/{image_file}"
remote_path = f"/home/sysadmin/{image_file}"
file_keywords.upload_file(local_path, remote_path, overwrite=False)
image_name = docker_loader.load_docker_image_to_host(image_file)
tagged_image = f"{local_registry.get_registry_url()}/{image_name}"
get_logger().log_info(f"image name {image_name} and tagged name {tagged_image} ...")
docker_loader.tag_docker_image_for_registry(
image_name,
image_name,
local_registry
)
docker_loader.push_docker_image_to_registry(image_name, local_registry)
# Step 3: Create docker registry secret for the selected namespace
get_logger().log_info(f"Creating secrets ")
secret_creator = KubectlCreateSecretsKeywords(ssh_connection)
secret_creator.create_secret_for_registry(local_registry, "regcred", namespace=namespace)
def scale_deployments(ssh_connection, replicas, namespace):
"""
Scales all deployments in the specified namespace according to replicas.
Args:
ssh_connection: The SSH connection to the target host.
replicas: The desired number of replicas for each deployment.
namespace: The Kubernetes namespace containing the deployments.
returns:
The time taken to scale the deployments: float
"""
pod_getter = KubectlGetPodsKeywords(ssh_connection)
pod_scaler = KubectlScaleDeploymentsKeywords(ssh_connection)
deployments_obj = KubectlGetDeploymentsKeywords(ssh_connection).get_deployments(namespace=namespace)
deployments = deployments_obj.get_deployments();
start_scale = time()
for deployment in deployments:
pod_scaler.scale_deployment(deployment.get_name(),
replicas=int(replicas/len(deployments)),
namespace=namespace)
assert pod_getter.wait_for_all_pods_status(expected_statuses=["Running", "Completed"])
scale_time = time() - start_scale
return scale_time