Fix references to get_local_registry

Signed-off-by: croy <Christian.Roy@windriver.com>
Change-Id: Ia03eade50efa97f6fcb61ba1039437beb417bf71
This commit is contained in:
croy
2025-12-08 09:58:33 -05:00
parent 0120d6d084
commit eb1edd1289
7 changed files with 58 additions and 75 deletions

View File

@@ -5,6 +5,7 @@ import yaml
from config.configuration_manager import ConfigurationManager
from framework.exceptions.keyword_exception import KeywordException
from framework.logging.automation_logger import get_logger
from framework.resources import resource_finder
from framework.ssh.ssh_connection import SSHConnection
from keywords.base_keyword import BaseKeyword
from keywords.docker.images.docker_images_keywords import DockerImagesKeywords
@@ -111,7 +112,7 @@ class DockerSyncImagesKeywords(BaseKeyword):
# Not a file path, try resolving as logical name from config
try:
path = self.docker_config.get_named_manifest(manifest)
return os.path.abspath(path)
return resource_finder.get_stx_resource_path(path)
except ValueError as e:
raise KeywordException(f"Failed to resolve manifest '{manifest}': {e}. " "Provide either a logical name defined in docker config JSON5 or a valid file path.") from e

View File

@@ -1,12 +1,11 @@
import os
from time import time
from typing import Tuple, List, Any
from typing import Any, List, Tuple
from config.configuration_manager import ConfigurationManager
from framework.logging.automation_logger import get_logger
from framework.resources.resource_finder import get_stx_resource_path
from framework.validation.validation import validate_equals
from keywords.cloud_platform.ssh.lab_connection_keywords import LabConnectionKeywords
from keywords.docker.images.docker_images_keywords import DockerImagesKeywords
from keywords.docker.images.docker_load_image_keywords import DockerLoadImageKeywords
from keywords.docker.login.docker_login_keywords import DockerLoginKeywords
@@ -34,16 +33,11 @@ class SetupStressPods:
Initialize the SetupStressPods class.
Args:
ssh_connection: SSH connection to the target system
ssh_connection (Any): SSH connection to the target system
"""
self.ssh_connection = ssh_connection
self.file_keywords = FileKeywords(ssh_connection)
self.images = [
"gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4",
"alexeiled/stress-ng",
"centos/tools:latest",
"datawiseio/fio:latest"
]
self.images = ["gcr.io/kubernetes-e2e-test-images/resource-consumer:1.4", "alexeiled/stress-ng", "centos/tools:latest", "datawiseio/fio:latest"]
self.scale_factor = 30
self.services_path = "resources/cloud_platform/system_test/pod_scaling/services"
self.deployments_path = "resources/cloud_platform/system_test/pod_scaling/deployments"
@@ -53,10 +47,10 @@ class SetupStressPods:
Set up stress pods for benchmark testing.
Args:
benchmark: The benchmark type to set up
benchmark (str): The benchmark type to set up
Returns:
Tuple of deploy time and scale up time in seconds
Tuple[float, float]: Tuple of deploy time and scale up time in seconds
"""
namespace = f"{benchmark}-benchmark"
local_services_dir = get_stx_resource_path(self.services_path)
@@ -78,22 +72,19 @@ class SetupStressPods:
self._setup_docker_registry(namespace, self.images)
get_logger().log_info(f"Apply services YAMLs....")
get_logger().log_info("Apply services YAMLs....")
service_files = self.file_keywords.get_files_in_dir(remote_services_dir)
pod_applier = KubectlApplyPodsKeywords(self.ssh_connection)
for svc_yaml in service_files:
pod_applier.apply_from_yaml(f"{remote_services_dir}/{svc_yaml}", namespace=namespace)
get_logger().log_info(f"Apply deployment YAMLs and calculate time....")
get_logger().log_info("Apply deployment YAMLs and calculate time....")
pod_getter = KubectlGetPodsKeywords(self.ssh_connection)
deployment_files = self.file_keywords.get_files_in_dir(remote_deployments_dir)
start_deploy = time()
for dep_yaml in deployment_files:
pod_applier.apply_from_yaml(f"{remote_deployments_dir}/{dep_yaml}", namespace=namespace)
validate_equals(
pod_getter.wait_for_all_pods_status(expected_statuses=["Running", "Completed"]),
True,
'Logs reached expected state')
validate_equals(pod_getter.wait_for_all_pods_status(expected_statuses=["Running", "Completed"]), True, "Logs reached expected state")
deploy_time = time() - start_deploy
get_logger().log_info(f"Time to deploy pods for the first time: {deploy_time:.2f} seconds")
@@ -102,19 +93,18 @@ class SetupStressPods:
get_logger().log_info(f"Time to scale up pods: {scale_up_time:.2f} seconds")
return deploy_time, scale_up_time
def _setup_upload_files(self, local_services_dir: str, remote_services_dir: str,
local_deployments_dir: str, remote_deployments_dir: str) -> None:
def _setup_upload_files(self, local_services_dir: str, remote_services_dir: str, local_deployments_dir: str, remote_deployments_dir: str) -> None:
"""
Upload necessary files to the controller node for the pod scaling test.
Args:
local_services_dir: Path to the local directory containing service YAML files
remote_services_dir: Path to the remote directory where service YAML files will be uploaded
local_deployments_dir: Path to the local directory containing deployment YAML files
remote_deployments_dir: Path to the remote directory where deployment YAML files will be uploaded
local_services_dir (str): Path to the local directory containing service YAML files
remote_services_dir (str): Path to the remote directory where service YAML files will be uploaded
local_deployments_dir (str): Path to the local directory containing deployment YAML files
remote_deployments_dir (str): Path to the remote directory where deployment YAML files will be uploaded
"""
get_logger().log_info(f"Uploading service yaml files ...")
get_logger().log_info("Uploading service yaml files ...")
self.file_keywords.create_directory(remote_services_dir)
for filename in os.listdir(local_services_dir):
local_file = os.path.join(local_services_dir, filename)
@@ -122,7 +112,7 @@ class SetupStressPods:
if os.path.isfile(local_file):
self.file_keywords.upload_file(local_file, remote_file, overwrite=True)
get_logger().log_info(f"Uploading deployment yaml files ...")
get_logger().log_info("Uploading deployment yaml files ...")
self.file_keywords.create_directory(remote_deployments_dir)
for filename in os.listdir(local_deployments_dir):
local_file = os.path.join(local_deployments_dir, filename)
@@ -130,7 +120,7 @@ class SetupStressPods:
if os.path.isfile(local_file):
self.file_keywords.upload_file(local_file, remote_file, overwrite=True)
get_logger().log_info(f"Uploading netdef yaml files ...")
get_logger().log_info("Uploading netdef yaml files ...")
local_netdef_file = get_stx_resource_path("resources/cloud_platform/system_test/pod_scaling/netdef-data0.yaml")
self.file_keywords.upload_file(local_netdef_file, f"{remote_deployments_dir}/netdef-data0.yaml", overwrite=True)
@@ -139,11 +129,11 @@ class SetupStressPods:
Set up the local Docker registry by pulling, tagging, and pushing necessary images.
Args:
namespace: Kubernetes namespace
images: List of Docker images to process
namespace (str): Kubernetes namespace
images (List[str]): List of Docker images to process
"""
docker_config = ConfigurationManager.get_docker_config()
local_registry = docker_config.get_registry("local_registry")
local_registry = docker_config.get_local_registry()
registry_url = local_registry.get_registry_url()
registry_user = local_registry.get_user_name()
registry_pass = local_registry.get_password()
@@ -172,11 +162,11 @@ class SetupStressPods:
Scale all deployments in the specified namespace according to replicas.
Args:
replicas: The desired number of replicas for each deployment
namespace: The Kubernetes namespace containing the deployments
replicas (int): The desired number of replicas for each deployment
namespace (str): The Kubernetes namespace containing the deployments
Returns:
The time taken to scale the deployments
float: The time taken to scale the deployments
"""
pod_getter = KubectlGetPodsKeywords(self.ssh_connection)
pod_scaler = KubectlScaleDeploymentsKeywords(self.ssh_connection)
@@ -184,10 +174,8 @@ class SetupStressPods:
deployments = deployments_obj.get_deployments()
start_scale = time()
for deployment in deployments:
pod_scaler.scale_deployment(deployment.get_name(),
replicas=int(replicas/len(deployments)),
namespace=namespace)
pod_scaler.scale_deployment(deployment.get_name(), replicas=int(replicas / len(deployments)), namespace=namespace)
assert pod_getter.wait_for_all_pods_status(expected_statuses=["Running", "Completed"])
scale_time = time() - start_scale
return scale_time
return scale_time

View File

@@ -1,7 +1,5 @@
from pytest import mark
from keywords.docker.images.docker_images_keywords import DockerImagesKeywords
from keywords.docker.images.docker_load_image_keywords import DockerLoadImageKeywords
from config.configuration_manager import ConfigurationManager
from framework.logging.automation_logger import get_logger
from framework.validation.validation import validate_greater_than
@@ -9,8 +7,11 @@ from keywords.cloud_platform.dcmanager.dcmanager_subcloud_backup_keywords import
from keywords.cloud_platform.dcmanager.dcmanager_subcloud_list_keywords import DcManagerSubcloudListKeywords
from keywords.cloud_platform.ssh.lab_connection_keywords import LabConnectionKeywords
from keywords.cloud_platform.version_info.cloud_platform_version_manager import CloudPlatformVersionManagerClass
from keywords.docker.images.docker_images_keywords import DockerImagesKeywords
from keywords.docker.images.docker_load_image_keywords import DockerLoadImageKeywords
from keywords.files.file_keywords import FileKeywords
def teardown_local(subcloud_name: str, local_path: str):
"""Teardown function for local backup.
@@ -40,7 +41,7 @@ def test_verify_backup_with_custom_docker_image(request):
"""
docker_config = ConfigurationManager.get_docker_config()
docker_img = "hello-world:latest"
local_registry = docker_config.get_registry("local_registry")
local_registry = docker_config.get_local_registry()
local_default_backup_path = "/opt/platform-backup/backups"
central_ssh = LabConnectionKeywords().get_active_controller_ssh()
subcloud_list = DcManagerSubcloudListKeywords(central_ssh)
@@ -50,7 +51,6 @@ def test_verify_backup_with_custom_docker_image(request):
subcloud_password = lab_config.get_admin_credentials().get_password()
release = CloudPlatformVersionManagerClass().get_sw_version()
# Pulls an image from central cloud that is not on subcloud registry
DockerImagesKeywords(subcloud_ssh).pull_image(docker_img)
DockerLoadImageKeywords(subcloud_ssh).tag_docker_image_for_registry(docker_img, docker_img, local_registry)
@@ -77,4 +77,4 @@ def test_verify_backup_with_custom_docker_image(request):
img_tarball = [file for file in files_in_bckp_dir if "image_registry" in file][0]
matches = FileKeywords(subcloud_ssh).find_in_tgz(f"{local_default_backup_path}/{release}/{img_tarball}", "repositories/hello-world")
validate_greater_than(matches, 0,f"Validate that were found mathces for hello-world in {img_tarball} tarball.")
validate_greater_than(matches, 0, f"Validate that were found mathces for hello-world in {img_tarball} tarball.")

View File

@@ -1,24 +1,24 @@
from pytest import mark, fail
from typing import List
from framework.validation.validation import validate_equals, validate_list_contains
from pytest import fail, mark
from config.configuration_manager import ConfigurationManager
from framework.logging.automation_logger import get_logger
from framework.validation.validation import validate_equals
from keywords.cloud_platform.dcmanager.dcmanager_subcloud_backup_keywords import DcManagerSubcloudBackupKeywords
from keywords.cloud_platform.dcmanager.dcmanager_subcloud_group_keywords import DcmanagerSubcloudGroupKeywords
from keywords.cloud_platform.dcmanager.dcmanager_subcloud_list_keywords import DcManagerSubcloudListKeywords
from keywords.cloud_platform.dcmanager.dcmanager_subcloud_manager_keywords import DcManagerSubcloudManagerKeywords
from keywords.cloud_platform.dcmanager.dcmanager_subcloud_group_keywords import DcmanagerSubcloudGroupKeywords
from keywords.cloud_platform.dcmanager.dcmanager_subcloud_show_keywords import DcManagerSubcloudShowKeywords
from keywords.cloud_platform.dcmanager.objects.dcmanager_subcloud_list_object_filter import DcManagerSubcloudListObjectFilter
from keywords.cloud_platform.dcmanager.dcmanager_subcloud_update_keywords import DcManagerSubcloudUpdateKeywords
from keywords.cloud_platform.dcmanager.objects.dcmanager_subcloud_list_object_filter import DcManagerSubcloudListObjectFilter
from keywords.cloud_platform.health.health_keywords import HealthKeywords
from keywords.cloud_platform.ssh.lab_connection_keywords import LabConnectionKeywords
from keywords.cloud_platform.system.host.system_host_list_keywords import SystemHostListKeywords
from keywords.cloud_platform.system.host.system_host_swact_keywords import SystemHostSwactKeywords
from keywords.cloud_platform.version_info.cloud_platform_version_manager import CloudPlatformVersionManagerClass
from keywords.files.file_keywords import FileKeywords
from keywords.docker.images.docker_images_keywords import DockerImagesKeywords
from keywords.docker.images.docker_load_image_keywords import DockerLoadImageKeywords
from keywords.files.file_keywords import FileKeywords
def create_subcloud_group(subcloud_list: List[str]) -> None:
@@ -431,7 +431,7 @@ def test_restore_remote_with_backup_values(request):
def teardown():
get_logger().log_info(f"Managing subcloud {subcloud_name}")
DcManagerSubcloudManagerKeywords(central_ssh).get_dcmanager_subcloud_manage(subcloud_name,10)
DcManagerSubcloudManagerKeywords(central_ssh).get_dcmanager_subcloud_manage(subcloud_name, 10)
get_logger().log_info("Removing test files during teardown")
FileKeywords(subcloud_ssh).delete_folder_with_sudo(local_path)
@@ -1032,8 +1032,7 @@ def test_restore_group_central_backup_active_load(request):
# Retrieves the subclouds. Considers only subclouds that are online, managed, and synced.
dcmanager_subcloud_list_input = DcManagerSubcloudListObjectFilter.get_healthy_subcloud_filter()
dcmanager_subcloud_list_keywords = DcManagerSubcloudListKeywords(central_ssh)
dcmanager_subcloud_list_objects_filtered = dcmanager_subcloud_list_keywords.get_dcmanager_subcloud_list().get_dcmanager_subcloud_list_objects_filtered(
dcmanager_subcloud_list_input)
dcmanager_subcloud_list_objects_filtered = dcmanager_subcloud_list_keywords.get_dcmanager_subcloud_list().get_dcmanager_subcloud_list_objects_filtered(dcmanager_subcloud_list_input)
subcloud_list = [subcloud.get_name() for subcloud in dcmanager_subcloud_list_objects_filtered]
if len(subcloud_list) < 2:
@@ -1048,8 +1047,7 @@ def test_restore_group_central_backup_active_load(request):
obj_health.validate_healty_cluster() # Checks alarms, pods, app health
# Gets the subcloud sysadmin password needed for backup creation.
subcloud_password = ConfigurationManager.get_lab_config().get_subcloud(
subcloud_list[0]).get_admin_credentials().get_password()
subcloud_password = ConfigurationManager.get_lab_config().get_subcloud(subcloud_list[0]).get_admin_credentials().get_password()
# Create a subcloud group and add 2 subclouds
get_logger().log_test_case_step(f"Creating subcloud group {group_name}.")
@@ -1078,13 +1076,11 @@ def test_restore_group_central_backup_active_load(request):
# Create a subcloud backup
get_logger().log_test_case_step(f"Create backup on Central Cloud for subcloud group: {group_name}")
dc_manager_backup.create_subcloud_backup(subcloud_password, central_ssh, group=group_name, release=str(release),
subcloud_list=subcloud_list)
dc_manager_backup.create_subcloud_backup(subcloud_password, central_ssh, group=group_name, release=str(release), subcloud_list=subcloud_list)
for subcloud_name in subcloud_list:
get_logger().log_test_case_step("Checking if backup was created on Central")
DcManagerSubcloudBackupKeywords(central_ssh).wait_for_backup_status_complete(subcloud_name,
expected_status="complete-central")
DcManagerSubcloudBackupKeywords(central_ssh).wait_for_backup_status_complete(subcloud_name, expected_status="complete-central")
DcManagerSubcloudManagerKeywords(central_ssh).get_dcmanager_subcloud_unmanage(subcloud_name, 10)
dc_manager_backup.restore_subcloud_backup(subcloud_password, central_ssh, group=group_name, with_install=True, subcloud_list=subcloud_list)
@@ -1092,6 +1088,7 @@ def test_restore_group_central_backup_active_load(request):
for subcloud_name in subcloud_list:
DcManagerSubcloudListKeywords(central_ssh).validate_subcloud_availability_status(subcloud_name)
@mark.p2
@mark.lab_has_min_2_subclouds
def test_restore_group_local_backup_active_load(request):
@@ -1117,8 +1114,7 @@ def test_restore_group_local_backup_active_load(request):
# Retrieves the subclouds. Considers only subclouds that are online, managed, and synced.
dcmanager_subcloud_list_input = DcManagerSubcloudListObjectFilter.get_healthy_subcloud_filter()
dcmanager_subcloud_list_keywords = DcManagerSubcloudListKeywords(central_ssh)
dcmanager_subcloud_list_objects_filtered = dcmanager_subcloud_list_keywords.get_dcmanager_subcloud_list().get_dcmanager_subcloud_list_objects_filtered(
dcmanager_subcloud_list_input)
dcmanager_subcloud_list_objects_filtered = dcmanager_subcloud_list_keywords.get_dcmanager_subcloud_list().get_dcmanager_subcloud_list_objects_filtered(dcmanager_subcloud_list_input)
subcloud_list = [subcloud.get_name() for subcloud in dcmanager_subcloud_list_objects_filtered]
if len(subcloud_list) < 2:
@@ -1133,8 +1129,7 @@ def test_restore_group_local_backup_active_load(request):
obj_health.validate_healty_cluster() # Checks alarms, pods, app health
# Gets the subcloud sysadmin password needed for backup creation.
subcloud_password = ConfigurationManager.get_lab_config().get_subcloud(
subcloud_list[0]).get_admin_credentials().get_password()
subcloud_password = ConfigurationManager.get_lab_config().get_subcloud(subcloud_list[0]).get_admin_credentials().get_password()
# Create a subcloud group and add 2 subclouds
get_logger().log_test_case_step(f"Creating subcloud group {group_name}.")
@@ -1163,14 +1158,12 @@ def test_restore_group_local_backup_active_load(request):
# Create a subcloud backup
get_logger().log_test_case_step(f"Create backup on local for subcloud group: {group_name}")
dc_manager_backup.create_subcloud_backup(subcloud_password, central_ssh, group=group_name, release=str(release),
subcloud_list=subcloud_list, local_only=True)
dc_manager_backup.create_subcloud_backup(subcloud_password, central_ssh, group=group_name, release=str(release), subcloud_list=subcloud_list, local_only=True)
for subcloud_name in subcloud_list:
subcloud_ssh = LabConnectionKeywords().get_subcloud_ssh(subcloud_name)
get_logger().log_test_case_step(f"Checking if backup was created in {subcloud_name}")
DcManagerSubcloudBackupKeywords(central_ssh).wait_for_backup_status_complete(subcloud_name,
expected_status="complete-local")
DcManagerSubcloudBackupKeywords(central_ssh).wait_for_backup_status_complete(subcloud_name, expected_status="complete-local")
DcManagerSubcloudManagerKeywords(central_ssh).get_dcmanager_subcloud_unmanage(subcloud_name, 10)
dc_manager_backup.restore_subcloud_backup(subcloud_password, central_ssh, group=group_name, with_install=True, subcloud_list=subcloud_list, local_only=True)
@@ -1178,6 +1171,7 @@ def test_restore_group_local_backup_active_load(request):
for subcloud_name in subcloud_list:
DcManagerSubcloudListKeywords(central_ssh).validate_subcloud_availability_status(subcloud_name)
@mark.p2
@mark.lab_has_subcloud
def test_verify_backup_restore_local_simplex_images(request):
@@ -1212,7 +1206,7 @@ def test_verify_backup_restore_local_simplex_images(request):
docker_config = ConfigurationManager.get_docker_config()
docker_img = "hello-world"
docker_tag = "latest"
local_registry = docker_config.get_registry("local_registry")
local_registry = docker_config.get_local_registry()
get_logger().log_test_case_step(f"Add custom docker image to {subcloud_name} registry.")
DockerImagesKeywords(subcloud_ssh).pull_image(f"{docker_img}:{docker_tag}")
DockerLoadImageKeywords(subcloud_ssh).tag_docker_image_for_registry(docker_img, docker_tag, local_registry)
@@ -1241,6 +1235,5 @@ def test_verify_backup_restore_local_simplex_images(request):
DcManagerSubcloudManagerKeywords(central_ssh).get_dcmanager_subcloud_unmanage(subcloud_name, 10)
dc_manager_backup.restore_subcloud_backup(subcloud_password, central_ssh, subcloud=subcloud_name, with_install=True, local_only=True, registry=True)
img_check = DockerImagesKeywords(subcloud_ssh).exists_image(local_registry, docker_img, docker_tag)
validate_equals(img_check, True, f"Validate that {docker_img} was restored with backup.")

View File

@@ -81,7 +81,7 @@ def deploy_docker_image_to_local_registry(ssh_connection: SSHConnection, secret_
None: This function does not return a value.
"""
local_registry = ConfigurationManager.get_docker_config().get_registry("local_registry")
local_registry = ConfigurationManager.get_docker_config().get_local_registry()
get_logger().log_info(f"Deploy docker images to local registry to {local_registry}")
FileKeywords(ssh_connection).upload_file(get_stx_resource_path("resources/images/pause.tar"), "/home/sysadmin/pause.tar")

View File

@@ -486,6 +486,7 @@ def test_dc_alarm_aggregation_managed():
subcloud_alarm = next((alarm for alarm in subcloud_alarms if alarm.alarm_id == fm_client_cli_object.get_alarm_id()), None)
validate_none(subcloud_alarm, f"Alarm with ID {DEFAULT_ALARM_ID} should not be present in subcloud {subcloud_name}")
@mark.p0
@mark.lab_has_subcloud
def test_dc_install_custom_app():
@@ -1397,7 +1398,7 @@ def deploy_images_to_local_registry(ssh_connection: SSHConnection):
Args:
ssh_connection (SSHConnection): the SSH connection.
"""
local_registry = ConfigurationManager.get_docker_config().get_registry("local_registry")
local_registry = ConfigurationManager.get_docker_config().get_local_registry()
file_keywords = FileKeywords(ssh_connection)
file_keywords.upload_file(get_stx_resource_path("resources/images/pv-test.tar"), "/home/sysadmin/pv-test.tar", overwrite=False)

View File

@@ -125,7 +125,7 @@ def test_push_docker_image_to_local_registry_standby(request):
ssh_connection = LabConnectionKeywords().get_active_controller_ssh()
local_registry = ConfigurationManager.get_docker_config().get_registry("local_registry")
local_registry = ConfigurationManager.get_docker_config().get_local_registry()
FileKeywords(ssh_connection).upload_file(get_stx_resource_path("resources/images/busybox.tar"), "/home/sysadmin/busybox.tar", overwrite=False)
KubectlCreateSecretsKeywords(ssh_connection).create_secret_for_registry(local_registry, "local-secret")
@@ -505,7 +505,7 @@ def deploy_images_to_local_registry(ssh_connection: SSHConnection):
ssh_connection (SSHConnection): the ssh connection
"""
local_registry = ConfigurationManager.get_docker_config().get_registry("local_registry")
local_registry = ConfigurationManager.get_docker_config().get_local_registry()
docker_load_image_keywords = DockerLoadImageKeywords(ssh_connection)
FileKeywords(ssh_connection).upload_file(get_stx_resource_path("resources/images/resource-consumer.tar"), "/home/sysadmin/resource-consumer.tar", overwrite=False)
@@ -703,7 +703,7 @@ def test_isolated_2processors_2big_pods_best_effort_simplex(request):
get_logger().log_info("Validated the cpu-manager-policy and topology-manager-policy from the kubelet command line.")
# Upload Docker image to local registry
local_registry = ConfigurationManager.get_docker_config().get_registry("local_registry")
local_registry = ConfigurationManager.get_docker_config().get_local_registry()
KubectlCreateSecretsKeywords(ssh_connection).create_secret_for_registry(local_registry, "local-secret")
file_keywords = FileKeywords(ssh_connection)
@@ -951,7 +951,7 @@ def test_isolated_2processors_2big_pods_best_effort_standby_controller(request):
get_logger().log_info("Validated the cpu-manager-policy and topology-manager-policy from the kubelet command line.")
# Upload Docker image to local registry
local_registry = ConfigurationManager.get_docker_config().get_registry("local_registry")
local_registry = ConfigurationManager.get_docker_config().get_local_registry()
KubectlCreateSecretsKeywords(standby_controller_ssh).create_secret_for_registry(local_registry, "local-secret")
file_keywords = FileKeywords(standby_controller_ssh)
@@ -1474,7 +1474,7 @@ def sriov_deploy_images_to_local_registry(ssh_connection: SSHConnection):
ssh_connection (SSHConnection): the ssh connection
"""
local_registry = ConfigurationManager.get_docker_config().get_registry("local_registry")
local_registry = ConfigurationManager.get_docker_config().get_local_registry()
file_keywords = FileKeywords(ssh_connection)
file_keywords.upload_file(get_stx_resource_path("resources/images/pv-test.tar"), "/home/sysadmin/pv-test.tar", overwrite=False)