expo backoff decode handling
This commit is contained in:
parent
1432eb4b97
commit
42b08c6251
1 changed files with 17 additions and 10 deletions
|
|
@ -380,23 +380,30 @@ class StreamDecoder:
|
|||
# Set the CUDA device for this thread
|
||||
torch.cuda.set_device(self.gpu_id)
|
||||
|
||||
retry_count = 0
|
||||
max_retries = 5
|
||||
consecutive_failures = 0
|
||||
backoff_delay = 2.0 # Start with 2 seconds
|
||||
max_backoff = 30.0 # Cap at 30 seconds
|
||||
|
||||
while not self._stop_flag.is_set():
|
||||
# Initialize connection
|
||||
if not self._init_rtsp_connection():
|
||||
retry_count += 1
|
||||
if retry_count >= max_retries:
|
||||
print(f"Max retries reached for {self.rtsp_url}")
|
||||
self._set_status(ConnectionStatus.ERROR)
|
||||
break
|
||||
consecutive_failures += 1
|
||||
|
||||
# Exponential backoff: 2s, 4s, 8s, 16s, 30s, 30s, ...
|
||||
backoff_delay = min(
|
||||
2.0 * (2 ** (consecutive_failures - 1)), max_backoff
|
||||
)
|
||||
|
||||
print(
|
||||
f"Connection failed for {self.rtsp_url}, retry in {backoff_delay:.0f}s (attempt {consecutive_failures})"
|
||||
)
|
||||
self._set_status(ConnectionStatus.RECONNECTING)
|
||||
self._stop_flag.wait(timeout=2.0)
|
||||
self._stop_flag.wait(timeout=backoff_delay)
|
||||
continue
|
||||
|
||||
retry_count = 0 # Reset on successful connection
|
||||
# Successfully connected - reset failure tracking
|
||||
consecutive_failures = 0
|
||||
backoff_delay = 2.0
|
||||
|
||||
try:
|
||||
# Decode loop - iterate through packets from PyAV
|
||||
|
|
@ -473,7 +480,7 @@ class StreamDecoder:
|
|||
print(f"Error in decode loop for {self.rtsp_url}: {e}")
|
||||
self._set_status(ConnectionStatus.RECONNECTING)
|
||||
self._cleanup()
|
||||
self._stop_flag.wait(timeout=2.0)
|
||||
# Will retry connection at top of loop with exponential backoff
|
||||
|
||||
def _cleanup(self):
|
||||
"""Cleanup resources"""
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue