# YOLO Annotator Configuration - CPU Only (ONNX) # # This configuration uses ONNX Runtime for CPU-only inference. # No GPU required - works on any system. # # Usage: # python scripts/annotate.py --config configs/annotator_cpu.yaml model: path: "models/yolov9t.onnx" # ONNX model file device: "cpu" # cpu (ONNX uses CPU by default) backend: "onnx" # Force ONNX backend conf_threshold: 0.25 # Confidence threshold iou_threshold: 0.45 # NMS IoU threshold # ONNX specific options onnx: num_threads: 0 # CPU threads (0 = auto) optimization_level: "all" # Graph optimization level video: source: "input/video.mp4" # Video file path sample_fps: 2 # Frames per second to extract max_frames: null # Max frames (null = all) start_time: 0 # Start time in seconds end_time: null # End time (null = end of video) resize: null # [width, height] or null detection: classes: null # Class IDs to keep (null = all) min_confidence: 0.3 # Minimum confidence to save min_area: 100 # Minimum bbox area in pixels max_area: null # Maximum bbox area (null = no limit) min_size: 0.01 # Minimum bbox dimension (normalized) output: directory: "output/annotations" # Output directory save_snapshots: true # Save clean images save_labels: true # Save YOLO labels save_debug: true # Save debug visualizations save_manifest: true # Save JSON manifest image_format: "jpg" # jpg or png image_quality: 95 # JPEG quality (1-100) # Class names (COCO subset - common objects) class_names: 0: person 1: bicycle 2: car 3: motorcycle 4: airplane 5: bus 6: train 7: truck 8: boat 14: bird 15: cat 16: dog 17: horse 18: sheep 19: cow