Refactor: Logging Cleanup

This commit is contained in:
ziesorx 2025-09-24 20:39:32 +07:00
parent 7a9a149955
commit 5176f99ba7
9 changed files with 37 additions and 72 deletions

View file

@ -400,7 +400,33 @@ core/
- [ ] Test stream interruption handling - [ ] Test stream interruption handling
- [ ] Test concurrent subscription management - [ ] Test concurrent subscription management
### 6.5 Final Cleanup ### 6.5 Logging Optimization & Cleanup ✅
- ✅ **Removed Debug Frame Saving**
- ✅ Removed hard-coded debug frame saving in `core/detection/pipeline.py`
- ✅ Removed hard-coded debug frame saving in `core/detection/branches.py`
- ✅ Eliminated absolute debug paths for production use
- ✅ **Eliminated Test/Mock Functionality**
- ✅ Removed `save_frame_for_testing` function from `core/streaming/buffers.py`
- ✅ Removed `save_test_frames` configuration from `StreamConfig`
- ✅ Cleaned up test frame saving calls in stream manager
- ✅ Updated module exports to remove test functions
- ✅ **Reduced Verbose Logging**
- ✅ Commented out verbose frame storage logging (every frame)
- ✅ Converted debug-level info logs to proper debug level
- ✅ Reduced repetitive frame dimension logging
- ✅ Maintained important model results and detection confidence logging
- ✅ Kept critical pipeline execution and error messages
- ✅ **Production-Ready Logging**
- ✅ Clean startup and initialization messages
- ✅ Clear model loading and pipeline status
- ✅ Preserved detection results with confidence scores
- ✅ Maintained session management and tracking messages
- ✅ Kept important error and warning messages
### 6.6 Final Cleanup
- [ ] Remove any remaining duplicate code - [ ] Remove any remaining duplicate code
- [ ] Optimize imports across all modules - [ ] Optimize imports across all modules
- [ ] Clean up temporary files and debugging code - [ ] Clean up temporary files and debugging code

View file

@ -393,7 +393,6 @@ class WebSocketHandler:
snapshot_url=payload.get('snapshotUrl'), snapshot_url=payload.get('snapshotUrl'),
snapshot_interval=payload.get('snapshotInterval', 5000), snapshot_interval=payload.get('snapshotInterval', 5000),
max_retries=3, max_retries=3,
save_test_frames=False # Disable frame saving, focus on tracking
) )
# Add subscription to StreamManager with tracking # Add subscription to StreamManager with tracking

View file

@ -441,19 +441,6 @@ class BranchProcessor:
logger.info(f"[INFERENCE START] {branch_id}: Running inference on {'cropped' if input_frame is not frame else 'full'} frame " logger.info(f"[INFERENCE START] {branch_id}: Running inference on {'cropped' if input_frame is not frame else 'full'} frame "
f"({input_frame.shape[1]}x{input_frame.shape[0]}) with confidence={min_confidence}") f"({input_frame.shape[1]}x{input_frame.shape[0]}) with confidence={min_confidence}")
# Save input frame for debugging
import os
import cv2
debug_dir = "/Users/ziesorx/Documents/Work/Adsist/Bangchak/worker/python-detector-worker/debug_frames"
timestamp = detection_context.get('timestamp', 'unknown')
session_id = detection_context.get('session_id', 'unknown')
debug_filename = f"{debug_dir}/{branch_id}_{session_id}_{timestamp}_input.jpg"
try:
cv2.imwrite(debug_filename, input_frame)
logger.info(f"[DEBUG] Saved inference input frame: {debug_filename} ({input_frame.shape[1]}x{input_frame.shape[0]})")
except Exception as e:
logger.warning(f"[DEBUG] Failed to save debug frame: {e}")
# Use .predict() method for both detection and classification models # Use .predict() method for both detection and classification models
inference_start = time.time() inference_start = time.time()

View file

@ -503,17 +503,6 @@ class DetectionPipeline:
'filename': f"{uuid.uuid4()}.jpg" 'filename': f"{uuid.uuid4()}.jpg"
} }
# Save full frame for debugging
import cv2
debug_dir = "/Users/ziesorx/Documents/Work/Adsist/Bangchak/worker/python-detector-worker/debug_frames"
timestamp = detection_context.get('timestamp', 'unknown')
session_id = detection_context.get('session_id', 'unknown')
debug_filename = f"{debug_dir}/pipeline_full_frame_{session_id}_{timestamp}.jpg"
try:
cv2.imwrite(debug_filename, frame)
logger.info(f"[DEBUG PIPELINE] Saved full input frame: {debug_filename} ({frame.shape[1]}x{frame.shape[0]})")
except Exception as e:
logger.warning(f"[DEBUG PIPELINE] Failed to save debug frame: {e}")
# Run inference on single snapshot using .predict() method # Run inference on single snapshot using .predict() method
detection_results = self.detection_model.model.predict( detection_results = self.detection_model.model.predict(

View file

@ -3,7 +3,7 @@ Streaming system for RTSP and HTTP camera feeds.
Provides modular frame readers, buffers, and stream management. Provides modular frame readers, buffers, and stream management.
""" """
from .readers import RTSPReader, HTTPSnapshotReader from .readers import RTSPReader, HTTPSnapshotReader
from .buffers import FrameBuffer, CacheBuffer, shared_frame_buffer, shared_cache_buffer, save_frame_for_testing from .buffers import FrameBuffer, CacheBuffer, shared_frame_buffer, shared_cache_buffer
from .manager import StreamManager, StreamConfig, SubscriptionInfo, shared_stream_manager from .manager import StreamManager, StreamConfig, SubscriptionInfo, shared_stream_manager
__all__ = [ __all__ = [
@ -16,7 +16,6 @@ __all__ = [
'CacheBuffer', 'CacheBuffer',
'shared_frame_buffer', 'shared_frame_buffer',
'shared_cache_buffer', 'shared_cache_buffer',
'save_frame_for_testing',
# Manager # Manager
'StreamManager', 'StreamManager',

View file

@ -67,8 +67,9 @@ class FrameBuffer:
'size_mb': frame.nbytes / (1024 * 1024) 'size_mb': frame.nbytes / (1024 * 1024)
} }
logger.debug(f"Stored {stream_type.value} frame for camera {camera_id}: " # Commented out verbose frame storage logging
f"{frame.shape[1]}x{frame.shape[0]}, {frame.nbytes / (1024 * 1024):.2f}MB") # logger.debug(f"Stored {stream_type.value} frame for camera {camera_id}: "
# f"{frame.shape[1]}x{frame.shape[0]}, {frame.nbytes / (1024 * 1024):.2f}MB")
def get_frame(self, camera_id: str) -> Optional[np.ndarray]: def get_frame(self, camera_id: str) -> Optional[np.ndarray]:
"""Get the latest frame for the given camera ID.""" """Get the latest frame for the given camera ID."""
@ -400,31 +401,3 @@ shared_frame_buffer = FrameBuffer(max_age_seconds=5)
shared_cache_buffer = CacheBuffer(max_age_seconds=10) shared_cache_buffer = CacheBuffer(max_age_seconds=10)
def save_frame_for_testing(camera_id: str, frame: np.ndarray, test_dir: str = "test_frames"):
"""Save frame to test directory for verification purposes."""
import os
try:
os.makedirs(test_dir, exist_ok=True)
timestamp = int(time.time() * 1000) # milliseconds
filename = f"{camera_id}_{timestamp}.jpg"
filepath = os.path.join(test_dir, filename)
# Use appropriate quality based on frame size
h, w = frame.shape[:2]
if w >= 2000: # High resolution
quality = 95
else: # Standard resolution
quality = 90
encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]
success = cv2.imwrite(filepath, frame, encode_params)
if success:
size_kb = os.path.getsize(filepath) / 1024
logger.info(f"Saved test frame: {filepath} ({w}x{h}, {size_kb:.1f}KB)")
else:
logger.error(f"Failed to save test frame: {filepath}")
except Exception as e:
logger.error(f"Error saving test frame for camera {camera_id}: {e}")

View file

@ -10,7 +10,7 @@ from dataclasses import dataclass
from collections import defaultdict from collections import defaultdict
from .readers import RTSPReader, HTTPSnapshotReader from .readers import RTSPReader, HTTPSnapshotReader
from .buffers import shared_cache_buffer, save_frame_for_testing, StreamType from .buffers import shared_cache_buffer, StreamType
from ..tracking.integration import TrackingPipelineIntegration from ..tracking.integration import TrackingPipelineIntegration
@ -25,7 +25,6 @@ class StreamConfig:
snapshot_url: Optional[str] = None snapshot_url: Optional[str] = None
snapshot_interval: int = 5000 # milliseconds snapshot_interval: int = 5000 # milliseconds
max_retries: int = 3 max_retries: int = 3
save_test_frames: bool = False
@dataclass @dataclass
@ -184,13 +183,6 @@ class StreamManager:
# Store frame in shared buffer with stream type # Store frame in shared buffer with stream type
shared_cache_buffer.put_frame(camera_id, frame, stream_type) shared_cache_buffer.put_frame(camera_id, frame, stream_type)
# Save test frames if enabled for any subscription
with self._lock:
for subscription_id in self._camera_subscribers[camera_id]:
subscription_info = self._subscriptions[subscription_id]
if subscription_info.stream_config.save_test_frames:
save_frame_for_testing(camera_id, frame)
break # Only save once per frame
# Process tracking for subscriptions with tracking integration # Process tracking for subscriptions with tracking integration
self._process_tracking_for_camera(camera_id, frame) self._process_tracking_for_camera(camera_id, frame)
@ -349,7 +341,6 @@ class StreamManager:
snapshot_url=payload.get('snapshotUrl'), snapshot_url=payload.get('snapshotUrl'),
snapshot_interval=payload.get('snapshotInterval', 5000), snapshot_interval=payload.get('snapshotInterval', 5000),
max_retries=3, max_retries=3,
save_test_frames=True # Enable for testing
) )
return self.add_subscription( return self.add_subscription(

View file

@ -200,11 +200,11 @@ class TrackingPipelineIntegration:
raw_detections = len(tracking_results.detections) raw_detections = len(tracking_results.detections)
if raw_detections > 0: if raw_detections > 0:
class_names = [detection.class_name for detection in tracking_results.detections] class_names = [detection.class_name for detection in tracking_results.detections]
logger.info(f"[DEBUG] Raw detections: {raw_detections}, classes: {class_names}") logger.debug(f"Raw detections: {raw_detections}, classes: {class_names}")
else: else:
logger.debug(f"[DEBUG] No raw detections found") logger.debug(f"No raw detections found")
else: else:
logger.debug(f"[DEBUG] No tracking results or detections attribute") logger.debug(f"No tracking results or detections attribute")
# Process tracking results # Process tracking results
tracked_vehicles = self.tracker.process_detections( tracked_vehicles = self.tracker.process_detections(

View file

@ -73,7 +73,8 @@ class StableCarValidator:
"""Update frame dimensions for zone calculations.""" """Update frame dimensions for zone calculations."""
self.frame_width = width self.frame_width = width
self.frame_height = height self.frame_height = height
logger.debug(f"Updated frame dimensions: {width}x{height}") # Commented out verbose frame dimension logging
# logger.debug(f"Updated frame dimensions: {width}x{height}")
def validate_vehicle(self, vehicle: TrackedVehicle, frame_shape: Optional[Tuple] = None) -> ValidationResult: def validate_vehicle(self, vehicle: TrackedVehicle, frame_shape: Optional[Tuple] = None) -> ValidationResult:
""" """