Remove LB_TIMEOUT in favor of lb_build_timeout
Using the CONF.kuryr_kubernetes.lb_build_timeout config option instead of the LB_TIMEOUT constant allows modifing the LB creation timeout for different environments where the LB creation time can vary significantly. Remove LB_RECONCILE_TIMEOUT as well in favor of a new config option CONF.kuryr_kubernetes.lb_reconcile_timeout. Change-Id: Id1eb72c294abd6a2e5111bd0c7f97e311969b698
This commit is contained in:
parent
c62a4a8be4
commit
8501f18e61
@ -104,6 +104,9 @@ kuryr_k8s_opts = [
|
||||
" number LB members"),
|
||||
cfg.BoolOpt("enable_reconciliation", default=False,
|
||||
help="Whether or not reconciliation is enabled"),
|
||||
cfg.IntOpt("lb_reconcile_timeout", default=600,
|
||||
help="The max time (in seconds) it should take for LB "
|
||||
"reconciliation. It doesn't include the LB build time."),
|
||||
cfg.BoolOpt("trigger_namespace_upon_pod", default=False,
|
||||
help="Whether or not Namespace should be handled upon Pod "
|
||||
"creation"),
|
||||
|
@ -526,7 +526,7 @@ class BaseKuryrScenarioTest(manager.NetworkScenarioTest):
|
||||
def get_svc_ip_on_annotation(cls, service_name, namespace):
|
||||
api = cls.k8s_client.CoreV1Api()
|
||||
start = time.time()
|
||||
while time.time() - start < consts.LB_TIMEOUT:
|
||||
while time.time() - start < CONF.kuryr_kubernetes.lb_build_timeout:
|
||||
time.sleep(5)
|
||||
service = api.read_namespaced_service(service_name, namespace)
|
||||
if service.status.load_balancer.ingress:
|
||||
@ -561,7 +561,7 @@ class BaseKuryrScenarioTest(manager.NetworkScenarioTest):
|
||||
def get_svc_ip_on_crd(cls, service_name, namespace):
|
||||
api = cls.k8s_client.CoreV1Api()
|
||||
start = time.time()
|
||||
while time.time() - start < consts.LB_TIMEOUT:
|
||||
while time.time() - start < CONF.kuryr_kubernetes.lb_build_timeout:
|
||||
time.sleep(5)
|
||||
service = api.read_namespaced_service(service_name, namespace)
|
||||
if service.status.load_balancer.ingress:
|
||||
@ -1153,7 +1153,8 @@ class BaseKuryrScenarioTest(manager.NetworkScenarioTest):
|
||||
|
||||
@classmethod
|
||||
def _verify_klb_crd(cls, name, poll_interval=1, namespace='default',
|
||||
timeout_period=consts.LB_TIMEOUT, pod_num=None):
|
||||
timeout_period=CONF.kuryr_kubernetes.lb_build_timeout,
|
||||
pod_num=None):
|
||||
start = time.time()
|
||||
klb_crd_has_status = False
|
||||
while time.time() - start < timeout_period:
|
||||
@ -1177,10 +1178,10 @@ class BaseKuryrScenarioTest(manager.NetworkScenarioTest):
|
||||
raise lib_exc.TimeoutException(msg)
|
||||
|
||||
@classmethod
|
||||
def _verify_endpoints_annotation(cls, ep_name, ann_string,
|
||||
poll_interval=1, namespace='default',
|
||||
pod_num=None,
|
||||
timeout_period=consts.LB_TIMEOUT):
|
||||
def _verify_endpoints_annotation(
|
||||
cls, ep_name, ann_string, poll_interval=1, namespace='default',
|
||||
pod_num=None,
|
||||
timeout_period=CONF.kuryr_kubernetes.lb_build_timeout):
|
||||
LOG.info("Look for %s string in ep=%s annotation ",
|
||||
ann_string, ep_name)
|
||||
|
||||
@ -1503,7 +1504,7 @@ class BaseKuryrScenarioTest(manager.NetworkScenarioTest):
|
||||
if CONF.kuryr_kubernetes.kuryrloadbalancers:
|
||||
klb_crd_id = self.get_klb_crd_id(service_name, namespace)
|
||||
start = time.time()
|
||||
while time.time() - start < consts.LB_TIMEOUT:
|
||||
while time.time() - start < CONF.kuryr_kubernetes.lb_build_timeout:
|
||||
try:
|
||||
lb_status = self.lbaas.get_loadbalancer_status(
|
||||
klb_crd_id)
|
||||
@ -1549,7 +1550,7 @@ class BaseKuryrScenarioTest(manager.NetworkScenarioTest):
|
||||
namespace=namespace)
|
||||
annotated_values = [value for i, value in annotation.items()]
|
||||
start = time.time()
|
||||
while time.time() - start < consts.LB_TIMEOUT:
|
||||
while time.time() - start < CONF.kuryr_kubernetes.lb_build_timeout:
|
||||
time.sleep(5)
|
||||
timeout_cli, timeout_mem = self.get_listener_timeout_on_crd(
|
||||
service_name=updated_service.metadata.name,
|
||||
|
@ -20,5 +20,3 @@ POD_AFFINITY = {'requiredDuringSchedulingIgnoredDuringExecution': [
|
||||
TIME_TO_APPLY_SGS = 30
|
||||
POD_STATUS_RETRIES = 240
|
||||
NS_TIMEOUT = 600
|
||||
LB_TIMEOUT = 1200
|
||||
LB_RECONCILE_TIMEOUT = 600
|
||||
|
@ -21,7 +21,6 @@ from tempest.lib import decorators
|
||||
from tempest.lib import exceptions as lib_exc
|
||||
|
||||
from kuryr_tempest_plugin.tests.scenario import base
|
||||
from kuryr_tempest_plugin.tests.scenario import consts
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = config.CONF
|
||||
@ -301,7 +300,7 @@ class TestLoadBalancerReconciliationScenario(base.BaseKuryrScenarioTest):
|
||||
self.lbaas.delete_loadbalancer(klb_crd_id, cascade=True)
|
||||
LOG.debug("Waiting for loadbalancer to be completely gone")
|
||||
start = time.time()
|
||||
while time.time() - start < consts.LB_TIMEOUT:
|
||||
while time.time() - start < CONF.kuryr_kubernetes.lb_build_timeout:
|
||||
try:
|
||||
time.sleep(30)
|
||||
self.lbaas.show_loadbalancer(klb_crd_id)
|
||||
@ -313,7 +312,8 @@ class TestLoadBalancerReconciliationScenario(base.BaseKuryrScenarioTest):
|
||||
" deleted", klb_crd_id)
|
||||
raise lib_exc.TimeoutException(msg)
|
||||
start = time.time()
|
||||
timeout = consts.LB_RECONCILE_TIMEOUT + consts.LB_TIMEOUT
|
||||
timeout = CONF.kuryr_kubernetes.lb_reconcile_timeout + \
|
||||
CONF.kuryr_kubernetes.lb_build_timeout
|
||||
# We need to add both timeouts to wait for the time for both rebuilding
|
||||
# and reconciliation of the LoadBalancer
|
||||
while time.time() - start < timeout:
|
||||
|
Loading…
Reference in New Issue
Block a user