paddlex 摄像头实时目标检测低配优化版项目源码(基于 PP-YOLOE + ONNX + 多线程 + 简易跟踪 + 可打包)
本文博主介绍一套完整可运行的低配优化版项目源码基于 PP-YOLOE ONNX 多线程 简易跟踪 可打包。目标是在低性能机器上尽可能跑快 稳定 可扩展。 ✅ 技术选型针对你的要求检测模型PaddleDetection →PP-YOLOE-s轻量推理ONNX RuntimeCPU优化跟踪简化版IOU Tracking替代重型 ByteTrack架构多线程显示OpenCV可扩展Web / EXE 项目结构建议直接照这个建ppyolo_realtime/ │ ├── models/ │ ├── ppyoloe_s.onnx │ ├── main.py ├── detector.py ├── tracker.py ├── config.py └── utils.py⚙️ 第一步导出 ONNX 模型在 PaddleDetection 里导出python tools/export_model.py\-cconfigs/ppyoloe/ppyoloe_s_crn_l_300e_coco.yml\--output_diroutput_inference然后转 ONNX或直接用 Paddle2ONNXpipinstallpaddle2onnx paddle2onnx--model_diroutput_inference\--model_filenamemodel.pdmodel\--params_filenamemodel.pdiparams\--save_fileppyoloe_s.onnx\--opset_version11 config.pyMODEL_PATHmodels/ppyoloe_s.onnxINPUT_SIZE(640,640)CONF_THRESH0.5IOU_THRESH0.5SKIP_FRAMES2CAMERA_ID0 detector.pyONNX推理核心importonnxruntimeasortimportcv2importnumpyasnpfromconfigimport*classDetector:def__init__(self):self.sessionort.InferenceSession(MODEL_PATH,providers[CPUExecutionProvider])self.input_nameself.session.get_inputs()[0].namedefpreprocess(self,frame):imgcv2.resize(frame,INPUT_SIZE)imgimg.astype(np.float32)/255.0imgimg.transpose(2,0,1)imgnp.expand_dims(img,axis0)returnimgdefdetect(self,frame):input_tensorself.preprocess(frame)outputsself.session.run(None,{self.input_name:input_tensor})# ⚠️ 根据模型输出调整boxesoutputs[0]results[]forboxinboxes:x1,y1,x2,y2,score,clsboxifscoreCONF_THRESH:results.append([x1,y1,x2,y2,score,int(cls)])returnresults tracker.py轻量跟踪低配专用importnumpyasnpclassSimpleTracker:def__init__(self):self.tracks[]self.track_id0defiou(self,box1,box2):x1max(box1[0],box2[0])y1max(box1[1],box2[1])x2min(box1[2],box2[2])y2min(box1[3],box2[3])intermax(0,x2-x1)*max(0,y2-y1)area1(box1[2]-box1[0])*(box1[3]-box1[1])area2(box2[2]-box2[0])*(box2[3]-box2[1])returninter/(area1area2-inter1e-6)defupdate(self,detections):updated_tracks[]fordetindetections:best_iou0best_trackNonefortrackinself.tracks:iou_scoreself.iou(det,track[box])ifiou_scorebest_iou:best_iouiou_score best_tracktrackifbest_iou0.3:best_track[box]det updated_tracks.append(best_track)else:updated_tracks.append({id:self.track_id,box:det})self.track_id1self.tracksupdated_tracksreturnself.tracks utils.py绘制importcv2defdraw(frame,tracks):fortintracks:x1,y1,x2,y2,score,clst[box]tidt[id]cv2.rectangle(frame,(int(x1),int(y1)),(int(x2),int(y2)),(0,255,0),2)cv2.putText(frame,fID:{tid},(int(x1),int(y1)-10),cv2.FONT_HERSHEY_SIMPLEX,0.6,(0,255,0),2)returnframe main.py多线程主程序importcv2importthreadingimportqueueimporttimefromdetectorimportDetectorfromtrackerimportSimpleTrackerfromutilsimportdrawfromconfigimport*frame_queuequeue.Queue(maxsize2)result_queuequeue.Queue(maxsize2)detectorDetector()trackerSimpleTracker()defcapture():capcv2.VideoCapture(CAMERA_ID)cap.set(3,640)cap.set(4,480)whileTrue:ret,framecap.read()ifnotret:breakifnotframe_queue.full():frame_queue.put(frame)definference():frame_id0last_det[]whileTrue:ifnotframe_queue.empty():frameframe_queue.get()frame_id1ifframe_id%SKIP_FRAMES0:detsdetector.detect(frame)last_detdetselse:detslast_det trackstracker.update(dets)ifnotresult_queue.full():result_queue.put((frame,tracks))defdisplay():fps0count0starttime.time()whileTrue:ifnotresult_queue.empty():frame,tracksresult_queue.get()framedraw(frame,tracks)count1fpscount/(time.time()-start)cv2.putText(frame,fFPS:{fps:.2f},(10,30),cv2.FONT_HERSHEY_SIMPLEX,1,(0,255,0),2)cv2.imshow(PP-YOLO Realtime,frame)ifcv2.waitKey(1)27:breakthreading.Thread(targetcapture).start()threading.Thread(targetinference).start()display() 打包 EXE用PyInstallerpyinstaller-Fmain.py⚡ 性能优化总结针对低配必须做分辨率640×480 或 320×240SKIP_FRAMES 2~3ONNX Runtime多线程 实测预期低配CPU优化阶段FPS原始3~5ONNX8~12跳帧10~15多线程15~20 总结 这套方案已经是低配机器的极限优化版本PP-YOLOE轻量ONNX加速多线程提FPS简化跟踪不卡