Dynamic workloads : Support for multiple OCP clusters

This patch adds support for multiple Openshift clusters in
kube-burner dynamic workloads.

Change-Id: I15a629c43fe359e40a28a75493fe27483e31d1d2
This commit is contained in:
Sanjay Chari 2021-09-14 13:16:52 +05:30
parent 96aff26d80
commit d4bb0a95a7
7 changed files with 39 additions and 12 deletions

View File

@ -9,6 +9,8 @@
- name: Deploy benchmark operator and make changes to scripts - name: Deploy benchmark operator and make changes to scripts
shell: | shell: |
export KUBECONFIG={{item}}
./install_e2e_benchmarking.sh ./install_e2e_benchmarking.sh
loop: "{{ lookup('file', '{{ browbeat_path }}/ansible/kubeconfig_paths').splitlines() }}"
args: args:
chdir: "{{ browbeat_path }}/ansible" chdir: "{{ browbeat_path }}/ansible"

1
ansible/kubeconfig_paths Normal file
View File

@ -0,0 +1 @@
/home/stack/.kube/config

View File

@ -589,6 +589,8 @@ workloads:
# shift_on_stack_workload can be poddensity, clusterdensity, maxnamespaces, # shift_on_stack_workload can be poddensity, clusterdensity, maxnamespaces,
# or maxservices # or maxservices
shift_on_stack_workload: poddensity shift_on_stack_workload: poddensity
shift_on_stack_kubeconfig_paths:
- /home/stack/.kube/config
# workloads can be 'all', a single workload(Eg. : create_delete_servers), # workloads can be 'all', a single workload(Eg. : create_delete_servers),
# or a comma separated string(Eg. : create_delete_servers,migrate_servers). # or a comma separated string(Eg. : create_delete_servers,migrate_servers).
# Currently supported workloads : create_delete_servers, migrate_servers # Currently supported workloads : create_delete_servers, migrate_servers

View File

@ -55,13 +55,16 @@ On the Undercloud
internet. Some useful documentation can be found at: internet. Some useful documentation can be found at:
https://access.redhat.com/documentation/en/red-hat-openstack-platform/11/single/networking-guide/ https://access.redhat.com/documentation/en/red-hat-openstack-platform/11/single/networking-guide/
(Optional) Clone e2e-benchmarking repository (Optional) Clone e2e-benchmarking repository and deploy benchmark-operator
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
e2e-benchmarking is a repository that is used to run workloads to stress an Openshift e2e-benchmarking is a repository that is used to run workloads to stress an Openshift
cluster, and is needed to run shift-on-stack workloads in Browbeat. cluster, and is needed to run shift-on-stack workloads in Browbeat.
To enable the e2e-benchmarking repository to be cloned, set install_e2e_benchmarking: true To enable the e2e-benchmarking repository to be cloned and benchmark-operator to be deployed,
in ansible/install/group_vars/all.yml before running the command mentioned below. set install_e2e_benchmarking: true in ansible/install/group_vars/all.yml.
After that, add the kubeconfig paths of all your Openshift clusters in the ansible/kubeconfig_paths
file. Move the default kubeconfig file(/home/stack/.kube/config) to another location so that it isn't
used for all Openshift clusters. After that, run the command mentioned below.
[stack@undercloud ansible]$ ansible-playbook -i hosts.yml install/browbeat.yml [stack@undercloud ansible]$ ansible-playbook -i hosts.yml install/browbeat.yml

View File

@ -10,6 +10,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import os
from rally_openstack import consts from rally_openstack import consts
from rally.task import scenario from rally.task import scenario
from rally.task import types from rally.task import types
@ -55,7 +57,8 @@ class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
num_pools, num_clients, delete_num_lbs, delete_num_members, num_create_vms, num_delete_vms, num_pools, num_clients, delete_num_lbs, delete_num_members, num_create_vms, num_delete_vms,
provider_phys_net, iface_name, iface_mac, num_vms_provider_net, provider_phys_net, iface_name, iface_mac, num_vms_provider_net,
shift_on_stack_job_iterations, shift_on_stack_qps, shift_on_stack_burst, shift_on_stack_job_iterations, shift_on_stack_qps, shift_on_stack_burst,
shift_on_stack_workload, workloads="all", router_create_args=None, network_create_args=None, shift_on_stack_workload, shift_on_stack_kubeconfig_paths, workloads="all",
router_create_args=None, network_create_args=None,
subnet_create_args=None, **kwargs): subnet_create_args=None, **kwargs):
workloads_list = workloads.split(",") workloads_list = workloads.split(",")
@ -83,6 +86,11 @@ class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
self.ext_net_name = self.clients("neutron").show_network(ext_net_id)["network"][ self.ext_net_name = self.clients("neutron").show_network(ext_net_id)["network"][
"name"] "name"]
try:
self.browbeat_dir = DynamicWorkload.browbeat_dir
except AttributeError:
DynamicWorkload.browbeat_dir = os.getcwd()
if workloads == "all" or "create_delete_servers" in workloads_list: if workloads == "all" or "create_delete_servers" in workloads_list:
self.boot_servers(smallest_image, smallest_flavor, num_create_vms, self.boot_servers(smallest_image, smallest_flavor, num_create_vms,
subnet_create_args=subnet_create_args) subnet_create_args=subnet_create_args)
@ -141,6 +149,10 @@ class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
self.provider_net_nova_delete(provider_phys_net) self.provider_net_nova_delete(provider_phys_net)
if "shift_on_stack" in workloads_list: if "shift_on_stack" in workloads_list:
num_openshift_clusters = len(shift_on_stack_kubeconfig_paths)
self.run_kube_burner_workload(shift_on_stack_workload, self.run_kube_burner_workload(shift_on_stack_workload,
shift_on_stack_job_iterations, shift_on_stack_job_iterations,
shift_on_stack_qps, shift_on_stack_burst) shift_on_stack_qps, shift_on_stack_burst,
shift_on_stack_kubeconfig_paths[
((self.context["iteration"] - 1)
% num_openshift_clusters)])

View File

@ -27,6 +27,7 @@
{% set shift_on_stack_qps = shift_on_stack_qps or 20 %} {% set shift_on_stack_qps = shift_on_stack_qps or 20 %}
{% set shift_on_stack_burst = shift_on_stack_burst or 20 %} {% set shift_on_stack_burst = shift_on_stack_burst or 20 %}
{% set shift_on_stack_workload = shift_on_stack_workload or 'poddensity' %} {% set shift_on_stack_workload = shift_on_stack_workload or 'poddensity' %}
{% set shift_on_stack_kubeconfig_paths = shift_on_stack_kubeconfig_paths or ['/home/stack/.kube/config'] %}
{% set router_external = router_external or True %} {% set router_external = router_external or True %}
{% set sla_max_avg_duration = sla_max_avg_duration or 60 %} {% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
{% set sla_max_failure = sla_max_failure or 0 %} {% set sla_max_failure = sla_max_failure or 0 %}
@ -73,6 +74,7 @@ BrowbeatPlugin.dynamic_workload:
shift_on_stack_qps: {{shift_on_stack_qps}} shift_on_stack_qps: {{shift_on_stack_qps}}
shift_on_stack_burst: {{shift_on_stack_burst}} shift_on_stack_burst: {{shift_on_stack_burst}}
shift_on_stack_workload: '{{shift_on_stack_workload}}' shift_on_stack_workload: '{{shift_on_stack_workload}}'
shift_on_stack_kubeconfig_paths: {{shift_on_stack_kubeconfig_paths}}
provider_phys_net: '{{ provider_phys_net }}' provider_phys_net: '{{ provider_phys_net }}'
iface_name: '{{ iface_name }}' iface_name: '{{ iface_name }}'
iface_mac: '{{ iface_mac }}' iface_mac: '{{ iface_mac }}'

View File

@ -19,22 +19,23 @@ import dynamic_utils
class ShiftStackDynamicScenario(dynamic_utils.NovaUtils, class ShiftStackDynamicScenario(dynamic_utils.NovaUtils,
dynamic_utils.NeutronUtils, dynamic_utils.NeutronUtils,
dynamic_utils.LockUtils): dynamic_utils.LockUtils):
def run_kube_burner_workload(self, workload, job_iterations, qps, burst): def run_kube_burner_workload(self, workload, job_iterations, qps, burst, kubeconfig):
"""Run kube-burner workloads through e2e-benchmarking """Run kube-burner workloads through e2e-benchmarking
:param workload: str, kube-burner workload to run :param workload: str, kube-burner workload to run
:param job_iterations: int, number of job iterations :param job_iterations: int, number of job iterations
:param qps: int, queries per second :param qps: int, queries per second
:param burst: int, burst value to throttle :param burst: int, burst value to throttle
:param kubeconfig: str, path to kubeconfig file
""" """
browbeat_dir = os.getcwd()
os.chdir( os.chdir(
browbeat_dir + "/ansible/gather/e2e-benchmarking/workloads/kube-burner" self.browbeat_dir + "/ansible/gather/e2e-benchmarking/workloads/kube-burner"
) )
e2e_benchmarking_dir = os.getcwd() e2e_benchmarking_dir = os.getcwd()
script_file_name = "run_" + workload + "_test_fromgit.sh" script_file_name = "run_" + workload + "_test_fromgit.sh"
script_file_path = e2e_benchmarking_dir + "/" + script_file_name script_file_path = e2e_benchmarking_dir + "/" + script_file_name
script_file = open(script_file_path, "r") script_file = open(script_file_path, "r")
updated_file_content = "" updated_file_content = ""
if workload == "poddensity": if workload == "poddensity":
@ -47,7 +48,10 @@ class ShiftStackDynamicScenario(dynamic_utils.NovaUtils,
job_iters_param = "SERVICE_COUNT" job_iters_param = "SERVICE_COUNT"
for line in script_file: for line in script_file:
if "TEST_JOB_ITERATIONS" in line: if "/usr/bin/bash" in line:
updated_file_content += line
updated_file_content += "export KUBECONFIG=${KUBECONFIG:-"+kubeconfig+"}\n"
elif "TEST_JOB_ITERATIONS" in line:
first_part_of_line = line.split("TEST")[0] first_part_of_line = line.split("TEST")[0]
updated_file_content += ( updated_file_content += (
first_part_of_line + "TEST_JOB_ITERATIONS=${" + job_iters_param + first_part_of_line + "TEST_JOB_ITERATIONS=${" + job_iters_param +
@ -56,7 +60,8 @@ class ShiftStackDynamicScenario(dynamic_utils.NovaUtils,
updated_file_content += "export QPS=" + str(qps) + "\n" updated_file_content += "export QPS=" + str(qps) + "\n"
updated_file_content += "export BURST=" + str(burst) + "\n" updated_file_content += "export BURST=" + str(burst) + "\n"
updated_file_content += "export CLEANUP_WHEN_FINISH=true\n" updated_file_content += "export CLEANUP_WHEN_FINISH=true\n"
else: elif ("export KUBECONFIG" not in line and "export QPS" not in line and
"export BURST" not in line and "export CLEANUP_WHEN_FINISH" not in line):
updated_file_content += line updated_file_content += line
with open(script_file_path, "w") as script_file_writer: with open(script_file_path, "w") as script_file_writer:
@ -65,4 +70,4 @@ class ShiftStackDynamicScenario(dynamic_utils.NovaUtils,
subprocess.run("./" + script_file_name + " 2>&1 | tee -a log.txt && exit ${PIPESTATUS}", subprocess.run("./" + script_file_name + " 2>&1 | tee -a log.txt && exit ${PIPESTATUS}",
shell=True, check=True, executable="/bin/bash") shell=True, check=True, executable="/bin/bash")
os.chdir(browbeat_dir) os.chdir(self.browbeat_dir)