py3: nfv use real division operator
"/" operator using integers as both left-hand and right-hand
expressions returns floor division in python2 and real
division in python3.
Use "//" instead to do real division for both python versions.
This commit is split from Charles Short's work
Iecfe9b83245847b07aaaa0ec1aadad4734a9d118.
Story: 2008454
Task: 42727
Co-authored-by: Charles Short <charles.short@windriver.com>
Signed-off-by: Dan Voiculeasa <dan.voiculeasa@windriver.com>
Change-Id: I69ddfc4ba17d43158f1897fe291751210fb3d1f9
(cherry picked from commit f9149780fb
)
This commit is contained in:
parent
a62dc15fee
commit
8acf167a69
@ -55,7 +55,7 @@ class Histogram(object):
|
||||
|
||||
self._sample_total += sample_as_int
|
||||
self._num_samples += 1
|
||||
self._average_sample = (self._sample_total / self._num_samples)
|
||||
self._average_sample = (self._sample_total // self._num_samples)
|
||||
|
||||
self._buckets[bucket_idx] += 1
|
||||
|
||||
|
@ -107,7 +107,7 @@ def selobj_dispatch(timeout_in_ms):
|
||||
_read_callbacks.pop(selobj)
|
||||
elapsed_ms = timers.get_monotonic_timestamp_in_ms() - start_ms
|
||||
histogram.add_histogram_data("selobj read: " + callback.__name__,
|
||||
elapsed_ms / 100, "decisecond")
|
||||
elapsed_ms // 100, "decisecond")
|
||||
|
||||
for selobj in writeable:
|
||||
callback = _write_callbacks.get(selobj, None)
|
||||
@ -119,7 +119,7 @@ def selobj_dispatch(timeout_in_ms):
|
||||
_write_callbacks.pop(selobj)
|
||||
elapsed_ms = timers.get_monotonic_timestamp_in_ms() - start_ms
|
||||
histogram.add_histogram_data("selobj write: " + callback.__name__,
|
||||
elapsed_ms / 100, "decisecond")
|
||||
elapsed_ms // 100, "decisecond")
|
||||
|
||||
for selobj in in_error:
|
||||
callback = _error_callbacks.get(selobj, None)
|
||||
@ -131,7 +131,7 @@ def selobj_dispatch(timeout_in_ms):
|
||||
_error_callbacks.pop(selobj)
|
||||
elapsed_ms = timers.get_monotonic_timestamp_in_ms() - start_ms
|
||||
histogram.add_histogram_data("selobj error: " + callback.__name__,
|
||||
elapsed_ms / 100, "decisecond")
|
||||
elapsed_ms // 100, "decisecond")
|
||||
|
||||
if selobj in list(_read_callbacks):
|
||||
_read_callbacks.pop(selobj)
|
||||
|
@ -179,7 +179,7 @@ class StrategyPhase(object):
|
||||
if 0 == total_steps:
|
||||
return 100
|
||||
|
||||
return int((completed_steps * 100) / total_steps)
|
||||
return int((completed_steps * 100) // total_steps)
|
||||
|
||||
def is_inprogress(self):
|
||||
"""
|
||||
|
@ -89,7 +89,7 @@ class TaskWorkerThread(thread.Thread):
|
||||
result.ancillary_result_data.execution_time, 'secs')
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
elapsed_secs = (now_ms - result.create_timestamp_ms) / 1000
|
||||
elapsed_secs = (now_ms - result.create_timestamp_ms) // 1000
|
||||
histogram.add_histogram_data(result.name + ' [execution-time]',
|
||||
elapsed_secs, 'secs')
|
||||
|
||||
|
@ -85,7 +85,7 @@ def _task_coroutine_with_timer(future, arg1, callback):
|
||||
if future.result.is_complete():
|
||||
if future.result.is_timer:
|
||||
if future.result.data == timer_id:
|
||||
elapsed_secs = (end_ms - start_ms) / 1000
|
||||
elapsed_secs = (end_ms - start_ms) // 1000
|
||||
if 2 < elapsed_secs:
|
||||
callback.send("FUNCTION PASSED")
|
||||
return
|
||||
|
@ -238,7 +238,7 @@ class TCPConnection(object):
|
||||
pass
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - start_ms) / 1000
|
||||
secs_expired = (now_ms - start_ms) // 1000
|
||||
if timeout_in_secs <= secs_expired:
|
||||
DLOG.info("Timed out waiting for a message.")
|
||||
break
|
||||
|
@ -78,7 +78,7 @@ class Thread(object):
|
||||
"""
|
||||
if self._stall_timestamp_ms is not None:
|
||||
now = timers.get_monotonic_timestamp_in_ms()
|
||||
return int((now - self._stall_timestamp_ms) / 1000)
|
||||
return int((now - self._stall_timestamp_ms) // 1000)
|
||||
return 0
|
||||
|
||||
@coroutine
|
||||
|
@ -60,7 +60,7 @@ class Timer(object):
|
||||
time has elapsed
|
||||
"""
|
||||
rearm = True
|
||||
secs_expired = (now_ms - self._arm_timestamp) / 1000
|
||||
secs_expired = (now_ms - self._arm_timestamp) // 1000
|
||||
if secs_expired > self._next_expiry_in_secs:
|
||||
DLOG.verbose("Timer %s with timer id %s fired." % (self._timer_name,
|
||||
self._timer_id))
|
||||
|
@ -75,7 +75,7 @@ class TimerScheduler(object):
|
||||
rearm = timer.callback(now_ms)
|
||||
elapsed_ms = get_monotonic_timestamp_in_ms() - start_ms
|
||||
histogram.add_histogram_data("timer callback: " + timer.timer_name,
|
||||
elapsed_ms / 100, "decisecond")
|
||||
elapsed_ms // 100, "decisecond")
|
||||
if not rearm and timer.timer_id not in self._timers_to_delete:
|
||||
self._timers_to_delete.append(timer.timer_id)
|
||||
finally:
|
||||
@ -89,7 +89,7 @@ class TimerScheduler(object):
|
||||
|
||||
elapsed_ms = get_monotonic_timestamp_in_ms() - overall_start_ms
|
||||
histogram.add_histogram_data("timer overall time per dispatch: ",
|
||||
elapsed_ms / 100, "decisecond")
|
||||
elapsed_ms // 100, "decisecond")
|
||||
|
||||
def add_timer(self, timer):
|
||||
"""
|
||||
|
@ -3433,7 +3433,7 @@ class NFVIComputeAPI(nfvi.api.v1.NFVIComputeAPI):
|
||||
self._rest_api_server.add_handler(
|
||||
'POST', '/v2.1/*', self.instance_action_rest_api_post_handler)
|
||||
|
||||
interval_secs = max(self._max_action_request_wait_in_secs / 2, 1)
|
||||
interval_secs = max(self._max_action_request_wait_in_secs // 2, 1)
|
||||
timers.timers_create_timer('compute-api-action-requests-audit',
|
||||
interval_secs, interval_secs,
|
||||
self._audit_action_requests)
|
||||
|
@ -338,7 +338,7 @@ def _rest_api_request(token_id, method, api_cmd, api_cmd_headers,
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
elapsed_ms = now_ms - start_ms
|
||||
elapsed_secs = elapsed_ms / 1000
|
||||
elapsed_secs = elapsed_ms // 1000
|
||||
|
||||
DLOG.verbose("Rest-API code=%s, headers=%s, response=%s"
|
||||
% (request.code, headers, response))
|
||||
|
@ -96,7 +96,7 @@ class Database(object):
|
||||
self._session.commit()
|
||||
elapsed_ms = timers.get_monotonic_timestamp_in_ms() - start_ms
|
||||
histogram.add_histogram_data("database-commits (periodic)",
|
||||
elapsed_ms / 100, "decisecond")
|
||||
elapsed_ms // 100, "decisecond")
|
||||
self._commit_timer_id = None
|
||||
|
||||
def commit(self):
|
||||
@ -105,7 +105,7 @@ class Database(object):
|
||||
self._session.commit()
|
||||
elapsed_ms = timers.get_monotonic_timestamp_in_ms() - start_ms
|
||||
histogram.add_histogram_data("database-commits (inline)",
|
||||
elapsed_ms / 100, "decisecond")
|
||||
elapsed_ms // 100, "decisecond")
|
||||
else:
|
||||
if self._commit_timer_id is None:
|
||||
self._commit_timer_id \
|
||||
|
@ -72,7 +72,7 @@ def _system_state_query_callback():
|
||||
dor_complete_hosts += 1
|
||||
|
||||
if 0 < total_hosts:
|
||||
completion_percentage = (dor_complete_hosts * 100 / total_hosts)
|
||||
completion_percentage = (dor_complete_hosts * 100 // total_hosts)
|
||||
else:
|
||||
completion_percentage = 0
|
||||
|
||||
@ -150,7 +150,7 @@ def _dor_timer():
|
||||
break
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
elapsed_secs = (now_ms - _process_start_timestamp_ms) / 1000
|
||||
elapsed_secs = (now_ms - _process_start_timestamp_ms) // 1000
|
||||
|
||||
if not _dor_stabilized and elapsed_secs > _dor_stabilize_uptime:
|
||||
_dor_stabilized = True
|
||||
|
@ -78,7 +78,7 @@ class DeletingState(state_machine.State):
|
||||
host.fsm_start_time = timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - host.fsm_start_time) / 1000
|
||||
secs_expired = (now_ms - host.fsm_start_time) // 1000
|
||||
|
||||
if max_wait > secs_expired:
|
||||
if not host.task.inprogress():
|
||||
|
@ -1098,7 +1098,7 @@ class WaitHostServicesDisabledTaskWork(state_machine.StateTaskWork):
|
||||
if self._service == objects.HOST_SERVICES.CONTAINER:
|
||||
if not self._query_inprogress:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
elapsed_secs = (now_ms - self._start_timestamp) / 1000
|
||||
elapsed_secs = (now_ms - self._start_timestamp) // 1000
|
||||
# Wait 10s before doing our first query
|
||||
if 10 <= elapsed_secs:
|
||||
DLOG.verbose("Wait-Host-Services-Disabled for %s for "
|
||||
@ -1111,7 +1111,7 @@ class WaitHostServicesDisabledTaskWork(state_machine.StateTaskWork):
|
||||
|
||||
elif HOST_EVENT.AUDIT == event:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
elapsed_secs = (now_ms - self._host.disable_extend_timestamp) / 1000
|
||||
elapsed_secs = (now_ms - self._host.disable_extend_timestamp) // 1000
|
||||
if 120 <= elapsed_secs:
|
||||
nfvi.nfvi_notify_host_services_disable_extend(
|
||||
self._host.uuid, self._host.name, self._extend_callback())
|
||||
@ -1661,7 +1661,7 @@ class NotifyInstancesHostDisablingTaskWork(state_machine.StateTaskWork):
|
||||
|
||||
else:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
elapsed_secs = (now_ms - self._host.disable_extend_timestamp) / 1000
|
||||
elapsed_secs = (now_ms - self._host.disable_extend_timestamp) // 1000
|
||||
if 120 <= elapsed_secs:
|
||||
nfvi.nfvi_notify_host_services_disable_extend(
|
||||
self._host.uuid, self._host.name, self._callback())
|
||||
@ -1819,7 +1819,7 @@ class NotifyInstancesHostDisabledTaskWork(state_machine.StateTaskWork):
|
||||
|
||||
elif HOST_EVENT.AUDIT == event:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
elapsed_secs = (now_ms - self._host.disable_extend_timestamp) / 1000
|
||||
elapsed_secs = (now_ms - self._host.disable_extend_timestamp) // 1000
|
||||
if 120 <= elapsed_secs:
|
||||
nfvi.nfvi_notify_host_services_disable_extend(
|
||||
self._host.uuid, self._host.name, self._callback())
|
||||
|
@ -100,7 +100,7 @@ class ColdMigrateState(state_machine.State):
|
||||
= timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - instance.action_fsm.wait_time) / 1000
|
||||
secs_expired = (now_ms - instance.action_fsm.wait_time) // 1000
|
||||
if 60 <= secs_expired:
|
||||
instance.fail_action(instance.action_fsm_action_type, 'timeout')
|
||||
instance_director.instance_evacuate_complete(
|
||||
@ -110,7 +110,7 @@ class ColdMigrateState(state_machine.State):
|
||||
|
||||
else:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - instance.action_fsm.start_time) / 1000
|
||||
secs_expired = (now_ms - instance.action_fsm.start_time) // 1000
|
||||
if instance.max_cold_migrate_wait_in_secs <= secs_expired:
|
||||
instance.fail_action(instance.action_fsm_action_type, 'timeout')
|
||||
instance_director.instance_migrate_complete(
|
||||
|
@ -122,7 +122,7 @@ class EvacuateState(state_machine.State):
|
||||
= timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - instance.action_fsm.wait_time) / 1000
|
||||
secs_expired = (now_ms - instance.action_fsm.wait_time) // 1000
|
||||
if 120 <= secs_expired:
|
||||
instance.fail_action(instance.action_fsm_action_type, 'timeout')
|
||||
instance_director.instance_evacuate_complete(
|
||||
@ -132,7 +132,7 @@ class EvacuateState(state_machine.State):
|
||||
|
||||
else:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - instance.action_fsm.start_time) / 1000
|
||||
secs_expired = (now_ms - instance.action_fsm.start_time) // 1000
|
||||
if instance.max_evacuate_wait_in_secs <= secs_expired:
|
||||
instance.fail_action(instance.action_fsm_action_type, 'timeout')
|
||||
instance_director.instance_evacuate_complete(
|
||||
|
@ -150,7 +150,7 @@ class LiveMigrateState(state_machine.State):
|
||||
= timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - instance.action_fsm.wait_time) / 1000
|
||||
secs_expired = (now_ms - instance.action_fsm.wait_time) // 1000
|
||||
if 60 <= secs_expired:
|
||||
instance.fail_action(instance.action_fsm_action_type, 'timeout')
|
||||
instance_director.instance_migrate_complete(
|
||||
@ -160,7 +160,7 @@ class LiveMigrateState(state_machine.State):
|
||||
|
||||
else:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - instance.action_fsm.start_time) / 1000
|
||||
secs_expired = (now_ms - instance.action_fsm.start_time) // 1000
|
||||
max_live_migrate_wait_in_secs = \
|
||||
instance.max_live_migrate_wait_in_secs
|
||||
if 0 != max_live_migrate_wait_in_secs:
|
||||
|
@ -99,7 +99,7 @@ class StartState(state_machine.State):
|
||||
else:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = \
|
||||
(now_ms - instance.action_fsm.wait_time) / 1000
|
||||
(now_ms - instance.action_fsm.wait_time) // 1000
|
||||
# Only wait 60 seconds for the instance to start.
|
||||
if 60 <= secs_expired:
|
||||
instance.fail_action(instance.action_fsm_action_type,
|
||||
|
@ -100,7 +100,7 @@ class StopState(state_machine.State):
|
||||
|
||||
else:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - instance.action_fsm.start_time) / 1000
|
||||
secs_expired = (now_ms - instance.action_fsm.start_time) // 1000
|
||||
# Wait up to 5 minutes for the VM to stop
|
||||
max_wait = 300
|
||||
if max_wait <= secs_expired or instance.task.timed_out():
|
||||
|
@ -261,7 +261,7 @@ class Host(ObjectData):
|
||||
|
||||
if 0 != self._last_state_timestamp:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._last_state_timestamp) / 1000
|
||||
secs_expired = (now_ms - self._last_state_timestamp) // 1000
|
||||
elapsed_time_in_state += int(secs_expired)
|
||||
|
||||
return elapsed_time_in_state
|
||||
@ -703,7 +703,7 @@ class Host(ObjectData):
|
||||
|
||||
else:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._last_state_timestamp) / 1000
|
||||
secs_expired = (now_ms - self._last_state_timestamp) // 1000
|
||||
if 30 <= secs_expired:
|
||||
if 0 != self._last_state_timestamp:
|
||||
self._elapsed_time_in_state += int(secs_expired)
|
||||
|
@ -1071,7 +1071,7 @@ class Instance(ObjectData):
|
||||
|
||||
if 0 != self._last_state_timestamp:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._last_state_timestamp) / 1000
|
||||
secs_expired = (now_ms - self._last_state_timestamp) // 1000
|
||||
elapsed_time_in_state += int(secs_expired)
|
||||
|
||||
return elapsed_time_in_state
|
||||
@ -2615,7 +2615,7 @@ class Instance(ObjectData):
|
||||
|
||||
else:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._last_state_timestamp) / 1000
|
||||
secs_expired = (now_ms - self._last_state_timestamp) // 1000
|
||||
if 15 <= secs_expired:
|
||||
if 0 != self._last_state_timestamp:
|
||||
self._elapsed_time_in_state += int(secs_expired)
|
||||
|
@ -289,7 +289,7 @@ class UnlockHostsStep(AbstractHostsStrategyStep):
|
||||
# See if we have requested a retry and are not currently retrying
|
||||
if self._retry_requested:
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._wait_time) / 1000
|
||||
secs_expired = (now_ms - self._wait_time) // 1000
|
||||
if self._retry_delay <= secs_expired:
|
||||
self._retry_requested = False
|
||||
# re-issue unlock for all hosts.
|
||||
@ -419,7 +419,7 @@ class LockHostsStep(strategy.StrategyStep):
|
||||
self._wait_time = timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._wait_time) / 1000
|
||||
secs_expired = (now_ms - self._wait_time) // 1000
|
||||
if 15 >= secs_expired:
|
||||
return True
|
||||
|
||||
@ -517,7 +517,7 @@ class RebootHostsStep(strategy.StrategyStep):
|
||||
self._wait_time = timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._wait_time) / 1000
|
||||
secs_expired = (now_ms - self._wait_time) // 1000
|
||||
if 60 <= secs_expired:
|
||||
# Wait 60 seconds, which should be enough time for the host
|
||||
# to shutdown and reboot. No need to wait for the host to
|
||||
@ -606,7 +606,7 @@ class SwactHostsStep(strategy.StrategyStep):
|
||||
self._wait_time = timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._wait_time) / 1000
|
||||
secs_expired = (now_ms - self._wait_time) // 1000
|
||||
if 120 <= secs_expired:
|
||||
result = strategy.STRATEGY_STEP_RESULT.SUCCESS
|
||||
self.stage.step_complete(result, '')
|
||||
@ -845,7 +845,7 @@ class UpgradeHostsStep(strategy.StrategyStep):
|
||||
self._wait_time = timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._wait_time) / 1000
|
||||
secs_expired = (now_ms - self._wait_time) // 1000
|
||||
# Wait at least 2 minutes for the host to go offline before
|
||||
# checking whether the upgrade is complete.
|
||||
if 120 <= secs_expired:
|
||||
@ -969,7 +969,7 @@ class UpgradeStartStep(strategy.StrategyStep):
|
||||
self._wait_time = timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._wait_time) / 1000
|
||||
secs_expired = (now_ms - self._wait_time) // 1000
|
||||
# Wait at least 60 seconds before checking upgrade for first time
|
||||
if 60 <= secs_expired and not self._query_inprogress:
|
||||
self._query_inprogress = True
|
||||
@ -1077,7 +1077,7 @@ class UpgradeActivateStep(strategy.StrategyStep):
|
||||
self._wait_time = timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._wait_time) / 1000
|
||||
secs_expired = (now_ms - self._wait_time) // 1000
|
||||
# Wait at least 60 seconds before checking upgrade for first time
|
||||
if 60 <= secs_expired and not self._query_inprogress:
|
||||
self._query_inprogress = True
|
||||
@ -1182,7 +1182,7 @@ class UpgradeCompleteStep(strategy.StrategyStep):
|
||||
self._wait_time = timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._wait_time) / 1000
|
||||
secs_expired = (now_ms - self._wait_time) // 1000
|
||||
# Wait at least 60 seconds before checking upgrade for first time
|
||||
if 60 <= secs_expired and not self._query_inprogress:
|
||||
self._query_inprogress = True
|
||||
@ -1784,7 +1784,7 @@ class WaitDataSyncStep(strategy.StrategyStep):
|
||||
self._wait_time = timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._wait_time) / 1000
|
||||
secs_expired = (now_ms - self._wait_time) // 1000
|
||||
# Wait at least 120 seconds before checking alarms for first time
|
||||
if 120 <= secs_expired and not self._query_inprogress:
|
||||
self._query_inprogress = True
|
||||
@ -1890,7 +1890,7 @@ class WaitAlarmsClearStep(strategy.StrategyStep):
|
||||
self._wait_time = timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._wait_time) / 1000
|
||||
secs_expired = (now_ms - self._wait_time) // 1000
|
||||
# Wait before checking alarms for first time
|
||||
if self._first_query_delay_in_secs <= secs_expired and not self._query_inprogress:
|
||||
self._query_inprogress = True
|
||||
@ -2265,7 +2265,7 @@ class FwUpdateHostsStep(strategy.StrategyStep):
|
||||
if 0 == self._wait_time:
|
||||
self._wait_time = timers.get_monotonic_timestamp_in_ms()
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._wait_time) / 1000
|
||||
secs_expired = (now_ms - self._wait_time) // 1000
|
||||
if 60 <= secs_expired:
|
||||
# force timer reload on next audit
|
||||
self._wait_time = 0
|
||||
@ -2942,7 +2942,7 @@ class AbstractKubeUpgradeStep(AbstractStrategyStep):
|
||||
self._wait_time = timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._wait_time) / 1000
|
||||
secs_expired = (now_ms - self._wait_time) // 1000
|
||||
# Wait at least 60 seconds before checking upgrade for first time
|
||||
if 60 <= secs_expired and not self._query_inprogress:
|
||||
self._query_inprogress = True
|
||||
@ -3406,7 +3406,7 @@ class KubeHostUpgradeKubeletStep(AbstractKubeHostListUpgradeStep):
|
||||
self._wait_time = timers.get_monotonic_timestamp_in_ms()
|
||||
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - self._wait_time) / 1000
|
||||
secs_expired = (now_ms - self._wait_time) // 1000
|
||||
# Wait at least 60 seconds before checking upgrade for first time
|
||||
if 60 <= secs_expired and not self._query_inprogress:
|
||||
self._query_inprogress = True
|
||||
|
@ -198,9 +198,9 @@ def process_main():
|
||||
if not init_complete:
|
||||
# Retry initialization for up to 3 minutes.
|
||||
now_ms = timers.get_monotonic_timestamp_in_ms()
|
||||
secs_expired = (now_ms - process_start_time) / 1000
|
||||
secs_expired = (now_ms - process_start_time) // 1000
|
||||
if secs_expired < 180:
|
||||
time_since_init = (now_ms - last_init_time) / 1000
|
||||
time_since_init = (now_ms - last_init_time) // 1000
|
||||
# Reattempt initialization every 10 seconds.
|
||||
if time_since_init > 10:
|
||||
init_complete = process_reinitialize()
|
||||
|
@ -41,7 +41,7 @@ class Proxy(Application):
|
||||
start_ms = get_monotonic_timestamp_in_ms()
|
||||
result = self.proxy_app(environ, start_response)
|
||||
now_ms = get_monotonic_timestamp_in_ms()
|
||||
elapsed_secs = (now_ms - start_ms) / 1000
|
||||
elapsed_secs = (now_ms - start_ms) // 1000
|
||||
histogram.add_histogram_data("%s" % environ['HTTP_HOST'], elapsed_secs)
|
||||
if environ.get('REQUEST_METHOD') == 'POST':
|
||||
if 'os-keypairs' in environ.get('PATH_INFO', ''):
|
||||
|
@ -54,7 +54,7 @@ class Histogram(object):
|
||||
|
||||
self._sample_total += sample_as_int
|
||||
self._num_samples += 1
|
||||
self._average_sample = (self._sample_total / self._num_samples)
|
||||
self._average_sample = (self._sample_total // self._num_samples)
|
||||
|
||||
self._buckets[bucket_idx] += 1
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user