Browse Source

使用方法和测试结果及存在的问题内

Curious 7 months ago
parent
commit
55ee0a6ff1

+ 43 - 0
README.md

@@ -1,2 +1,45 @@
 ## 车端例程开发
 
+### 启动文件:
+     demo.py:多摄像头检测的demo(资源不够暂时未完善)
+     demo1.py:单摄像头检测的demo
+
+### 检测文件:
+     AIDetector_pytorch.py
+
+### 跟踪更新文件:
+     tracker.py
+     最后检测加ID跟踪结果的数据可在tracker.py中:def plot_bboxes
+     def plot_bboxes中cls_id为类别ID,pos_id为跟踪器ID
+
+### 启动方式:
+     demo1.py:
+     python3.8 demo1.py --camera_id 1 --width 1280 --height 720
+     三个参数默认值分别为:1,1280,720
+     demo.py:
+     python demo.py --camera_count 6 --width 1280 --height 720
+     三个参数默认值分别为:6,1280,720
+
+### requirements
+    在python==3.8内下载
+    pip install -r requirements 备注:测试成功是一项一项下载的
+    需要手动下载的为:
+    matplotlib-3.5.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64
+    onnxruntime_gpu-1.10.0-cp38-cp38-linux_aarch64
+
+### CPU下性能指标
+    无人时每张图片处理完成为0.16s左右
+    有人时每张图片处理完成为0.3s左右
+    结果参考为detection_log.txt
+
+### 未解决的问题
+    CUDA无法调用,GPU测试无法完成
+    requirements环境下报错为:
+    [E:onnxruntime:Default, provider_bridge_ort.cc:995 Get] Failed to load library libonnxruntime_providers_cuda.so with error: libcublas.so.10: cannot open shared object file: No such file or directory
+    [W:onnxruntime:Default, onnxruntime_pybind_state.cc:535 CreateExecutionProviderInstance] Failed to create CUDAExecutionProvider. Please reference https://onnxruntime.ai/docs/reference/execution-providers/CUDA-ExecutionProvider.html#requirements to ensure all dependencies are met.
+    尝试过的解决方案:
+    1、添加cuDNN
+    2、检查onnx,cuda,cuDNN的版本(对应没问题)
+
+
+

+ 2 - 1
test/test-yolov5-deepsort/AIDetector_pytorch.py

@@ -24,7 +24,8 @@ class Detector(baseDet):
     def init_model(self):
         self.weights = 'weights/yolov5s.onnx'
         self.device = '0' if torch.cuda.is_available() else 'cpu'
-        self.session = ort.InferenceSession(self.weights, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
+        #self.session = ort.InferenceSession(self.weights, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
+        self.session = ort.InferenceSession(self.weights, providers=['CUDAExecutionProvider'])
         self.names = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
                       'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
                       'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',

+ 15 - 9
test/test-yolov5-deepsort/demo.py

@@ -2,23 +2,24 @@ import gc
 import cv2
 import imutils
 import threading
+import argparse  # 导入argparse
 from AIDetector_pytorch import Detector
 
-
-def start_camera_detector(camera_id, detector):
+def start_camera_detector(camera_id, width, height, detector):
     name = 'Demo Camera {}'.format(camera_id)
     cap = cv2.VideoCapture(camera_id, cv2.CAP_V4L2)
     if not cap.isOpened():
         print('Error: Unable to open camera {}.'.format(camera_id))
         return
-    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
-    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 640)
+    
+    cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
+    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
 
     fps = cap.get(cv2.CAP_PROP_FPS)
     if fps <= 0:
         fps = 30
     t = int(1000 / fps)
-    print(f'{name} fps:', fps)
+    print('{} fps:'.format(name), fps)
 
     frame_count = 0
 
@@ -44,19 +45,24 @@ def start_camera_detector(camera_id, detector):
     cap.release()
     cv2.destroyWindow(name)
 
-
 def main():
+    parser = argparse.ArgumentParser(description='Camera Detection with ONNX.')
+    parser.add_argument('--camera_count', type=int, default=6, help='Number of cameras to use.')
+    parser.add_argument('--width', type=int, default=1280, help='Input the wight of the video image(default=1280)')
+    parser.add_argument('--height', type=int, default=720, help='Input the height of the video image(default=720)')
+    
+    args = parser.parse_args()
+
     detector = Detector()
 
     threads = []
-    for i in range(6): # camera 数量
-        thread = threading.Thread(target=start_camera_detector, args=(i, detector))
+    for i in range(args.camera_count):
+        thread = threading.Thread(target=start_camera_detector, args=(i, args.width, args.height, detector))
         thread.start()
         threads.append(thread)
 
     for thread in threads:
         thread.join()
 
-
 if __name__ == "__main__":
     main()

+ 17 - 11
test/test-yolov5-deepsort/demo1.py

@@ -3,26 +3,26 @@ import gc
 import cv2
 import imutils
 import threading
+import time  # 导入时间模块
 from AIDetector_pytorch import Detector
-
+import logging
+import argparse  # 导入argparse库
 
 # 用于控制检测线程退出的事件
 stop_event = threading.Event()
 
-def start_camera_detector(camera_id, detector, output_directory):
+def start_camera_detector(camera_id, width, height, detector, output_directory):
     name = 'Demo Camera {}'.format(camera_id)
-    #cap = cv2.VideoCapture(camera_id)
     cap = cv2.VideoCapture(camera_id, cv2.CAP_V4L2)
     if not cap.isOpened():
         print('Error: Unable to open camera {}.'.format(camera_id))
         return
-    cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
-    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
+    cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
+    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
 
     fps = cap.get(cv2.CAP_PROP_FPS)
     if fps <= 0:
         fps = 30
-    t = int(1000 / fps)
     print(f'{name} fps:', fps)
 
     frame_count = 0
@@ -33,14 +33,17 @@ def start_camera_detector(camera_id, detector, output_directory):
             break
 
         if frame_count % 3 == 0:
+            start_time = time.time()
             result = detector.feedCap(im)
             result_frame = result['frame']
+            elapsed_time = time.time() - start_time
             result_frame = imutils.resize(result_frame, height=500)
 
             # Save the result image
             filename = os.path.join(output_directory, f'camera_{camera_id}_frame_{frame_count}.jpg')
             cv2.imwrite(filename, result_frame)
             print(f'Saved image: {filename}')
+            logging.info(f'Frame {frame_count} processed in {elapsed_time:.4f} seconds.')
 
         frame_count += 1
         if frame_count % 30 == 0:
@@ -48,17 +51,21 @@ def start_camera_detector(camera_id, detector, output_directory):
 
     cap.release()
 
-
 def main():
+    parser = argparse.ArgumentParser(description='Camera Detection with ONNX.')
+    parser.add_argument('--camera_id', type=int, default=1, help='Camera ID to use.')
+    parser.add_argument('--width', type=int, default=1280, help='Input the wight of the video image(default=1280)')
+    parser.add_argument('--height', type=int, default=720, help='Input the height of the video image(default=720)')
+    
+    args = parser.parse_args()
+
     detector = Detector()
 
     output_directory = 'output_images'
     os.makedirs(output_directory, exist_ok=True)  # Create output directory if not exists
 
     threads = []
-    #for i in range(1):  # camera 数量
-    i=1 
-    thread = threading.Thread(target=start_camera_detector, args=(i, detector, output_directory))
+    thread = threading.Thread(target=start_camera_detector, args=(args.camera_id, args.width, args.height, detector, output_directory))
     thread.start()
     threads.append(thread)
 
@@ -72,6 +79,5 @@ def main():
     for thread in threads:
         thread.join()
 
-
 if __name__ == "__main__":
     main()

+ 107 - 0
test/test-yolov5-deepsort/detection_log.txt

@@ -0,0 +1,107 @@
+2024-08-01 15:01:05,259 - Loading weights from deep_sort/deep_sort/deep/checkpoint/ckpt.t7... Done!
+2024-08-01 15:01:05,746 - Frame 0 processed in 0.2474 seconds.
+2024-08-01 15:01:06,028 - Frame 3 processed in 0.2431 seconds.
+2024-08-01 15:01:06,275 - Detected: person ID-1
+2024-08-01 15:01:06,288 - Frame 6 processed in 0.2212 seconds.
+2024-08-01 15:01:06,718 - Detected: person ID-1
+2024-08-01 15:01:06,732 - Frame 9 processed in 0.4047 seconds.
+2024-08-01 15:01:07,037 - Detected: person ID-1
+2024-08-01 15:01:07,050 - Frame 12 processed in 0.2800 seconds.
+2024-08-01 15:01:07,335 - Detected: person ID-1
+2024-08-01 15:01:07,349 - Frame 15 processed in 0.2596 seconds.
+2024-08-01 15:01:07,614 - Detected: person ID-1
+2024-08-01 15:01:07,627 - Frame 18 processed in 0.2396 seconds.
+2024-08-01 15:01:07,961 - Detected: person ID-1
+2024-08-01 15:01:07,974 - Frame 21 processed in 0.3080 seconds.
+2024-08-01 15:01:08,319 - Detected: person ID-1
+2024-08-01 15:01:08,333 - Frame 24 processed in 0.3196 seconds.
+2024-08-01 15:01:08,606 - Detected: person ID-1
+2024-08-01 15:01:08,619 - Frame 27 processed in 0.2476 seconds.
+2024-08-01 15:01:09,047 - Detected: person ID-1
+2024-08-01 15:01:09,060 - Frame 30 processed in 0.3247 seconds.
+2024-08-01 15:01:09,344 - Detected: person ID-1
+2024-08-01 15:01:09,358 - Frame 33 processed in 0.2583 seconds.
+2024-08-01 15:01:09,603 - Detected: person ID-1
+2024-08-01 15:01:09,617 - Frame 36 processed in 0.2204 seconds.
+2024-08-01 15:01:09,872 - Detected: person ID-1
+2024-08-01 15:01:09,886 - Frame 39 processed in 0.2302 seconds.
+2024-08-01 15:01:10,126 - Detected: person ID-1
+2024-08-01 15:01:10,139 - Frame 42 processed in 0.2145 seconds.
+2024-08-01 15:01:10,416 - Detected: person ID-1
+2024-08-01 15:01:10,430 - Frame 45 processed in 0.2489 seconds.
+2024-08-01 15:01:10,673 - Detected: person ID-1
+2024-08-01 15:01:10,687 - Frame 48 processed in 0.2183 seconds.
+2024-08-01 15:01:10,992 - Detected: person ID-1
+2024-08-01 15:01:11,005 - Frame 51 processed in 0.2797 seconds.
+2024-08-01 15:01:11,348 - Detected: person ID-1
+2024-08-01 15:01:11,362 - Frame 54 processed in 0.3178 seconds.
+2024-08-01 15:01:11,687 - Detected: person ID-1
+2024-08-01 15:01:11,700 - Frame 57 processed in 0.2978 seconds.
+2024-08-01 15:01:12,043 - Detected: person ID-1
+2024-08-01 15:01:12,057 - Frame 60 processed in 0.2399 seconds.
+2024-08-01 15:01:12,357 - Detected: person ID-1
+2024-08-01 15:01:12,371 - Frame 63 processed in 0.2756 seconds.
+2024-08-01 15:01:12,671 - Detected: person ID-1
+2024-08-01 15:01:12,685 - Frame 66 processed in 0.2748 seconds.
+2024-08-01 15:01:12,980 - Detected: person ID-1
+2024-08-01 15:01:12,981 - Detected: person ID-3
+2024-08-01 15:01:12,994 - Frame 69 processed in 0.2704 seconds.
+2024-08-01 15:01:13,169 - Detected: person ID-3
+2024-08-01 15:01:13,182 - Frame 72 processed in 0.1491 seconds.
+2024-08-01 15:01:13,484 - Detected: person ID-3
+2024-08-01 15:01:13,497 - Frame 75 processed in 0.2761 seconds.
+2024-08-01 15:01:13,795 - Detected: person ID-3
+2024-08-01 15:01:13,809 - Frame 78 processed in 0.2730 seconds.
+2024-08-01 15:01:14,110 - Detected: person ID-3
+2024-08-01 15:01:14,110 - Detected: person ID-5
+2024-08-01 15:01:14,124 - Frame 81 processed in 0.2760 seconds.
+2024-08-01 15:01:14,382 - Detected: person ID-3
+2024-08-01 15:01:14,383 - Detected: person ID-5
+2024-08-01 15:01:14,396 - Frame 84 processed in 0.2337 seconds.
+2024-08-01 15:01:14,692 - Detected: person ID-5
+2024-08-01 15:01:14,706 - Frame 87 processed in 0.2709 seconds.
+2024-08-01 15:01:15,045 - Detected: person ID-5
+2024-08-01 15:01:15,058 - Frame 90 processed in 0.2352 seconds.
+2024-08-01 15:01:15,360 - Detected: person ID-5
+2024-08-01 15:01:15,373 - Frame 93 processed in 0.2762 seconds.
+2024-08-01 15:01:15,667 - Detected: person ID-1
+2024-08-01 15:01:15,667 - Detected: person ID-5
+2024-08-01 15:01:15,681 - Frame 96 processed in 0.2685 seconds.
+2024-08-01 15:01:15,978 - Detected: person ID-1
+2024-08-01 15:01:15,979 - Detected: person ID-5
+2024-08-01 15:01:15,992 - Frame 99 processed in 0.2727 seconds.
+2024-08-01 15:01:16,291 - Detected: person ID-1
+2024-08-01 15:01:16,292 - Detected: person ID-5
+2024-08-01 15:01:16,305 - Frame 102 processed in 0.2743 seconds.
+2024-08-01 15:01:16,597 - Detected: person ID-1
+2024-08-01 15:01:16,597 - Detected: person ID-5
+2024-08-01 15:01:16,611 - Frame 105 processed in 0.2670 seconds.
+2024-08-01 15:01:16,907 - Detected: person ID-1
+2024-08-01 15:01:16,907 - Detected: person ID-5
+2024-08-01 15:01:16,921 - Frame 108 processed in 0.2708 seconds.
+2024-08-01 15:01:17,223 - Detected: person ID-5
+2024-08-01 15:01:17,237 - Frame 111 processed in 0.2776 seconds.
+2024-08-01 15:01:17,545 - Detected: person ID-5
+2024-08-01 15:01:17,546 - Detected: person ID-6
+2024-08-01 15:01:17,559 - Frame 114 processed in 0.2828 seconds.
+2024-08-01 15:01:17,864 - Detected: person ID-5
+2024-08-01 15:01:17,865 - Detected: person ID-6
+2024-08-01 15:01:17,878 - Frame 117 processed in 0.2803 seconds.
+2024-08-01 15:01:18,446 - Detected: person ID-5
+2024-08-01 15:01:18,447 - Detected: person ID-6
+2024-08-01 15:01:18,460 - Frame 120 processed in 0.4661 seconds.
+2024-08-01 15:01:18,759 - Detected: person ID-6
+2024-08-01 15:01:18,773 - Frame 123 processed in 0.2739 seconds.
+2024-08-01 15:01:19,088 - Detected: person ID-5
+2024-08-01 15:01:19,088 - Detected: person ID-6
+2024-08-01 15:01:19,101 - Frame 126 processed in 0.2901 seconds.
+2024-08-01 15:01:19,473 - Detected: person ID-5
+2024-08-01 15:01:19,486 - Frame 129 processed in 0.3462 seconds.
+2024-08-01 15:01:19,763 - Detected: person ID-5
+2024-08-01 15:01:19,777 - Frame 132 processed in 0.2517 seconds.
+2024-08-01 15:01:20,040 - Detected: person ID-5
+2024-08-01 15:01:20,054 - Frame 135 processed in 0.2378 seconds.
+2024-08-01 15:01:20,300 - Detected: person ID-5
+2024-08-01 15:01:20,314 - Frame 138 processed in 0.2211 seconds.
+2024-08-01 15:01:20,602 - Detected: person ID-5
+2024-08-01 15:01:20,616 - Frame 141 processed in 0.2631 seconds.

+ 10 - 3
test/test-yolov5-deepsort/tracker.py

@@ -2,6 +2,10 @@ from deep_sort.utils.parser import get_config
 from deep_sort.deep_sort import DeepSort
 import torch
 import cv2
+import logging
+
+logging.basicConfig(filename='detection_log.txt', level=logging.INFO, 
+                    format='%(asctime)s - %(message)s')
 
 palette = (2 ** 11 - 1, 2 ** 15 - 1, 2 ** 20 - 1)
 cfg = get_config()
@@ -15,13 +19,16 @@ deepsort = DeepSort(cfg.DEEPSORT.REID_CKPT,
 
 def plot_bboxes(image, bboxes, line_thickness=None):
     # Plots one bounding box on image img
-    tl = line_thickness or round(
-        0.002 * (image.shape[0] + image.shape[1]) / 2) + 1  # line/font thickness
+    tl = line_thickness or round(0.002 * (image.shape[0] + image.shape[1]) / 2) + 1  # line/font thickness
+
     for (x1, y1, x2, y2, cls_id, pos_id) in bboxes:
-        if cls_id in ['person']:
+        if cls_id == 'person':
             color = (0, 0, 255)
+            # 输出日志信息
+            logging.info(f'Detected: {cls_id} ID-{pos_id}')  # 记录检测到的类别和ID
         else:
             color = (0, 255, 0)
+
         c1, c2 = (x1, y1), (x2, y2)
         cv2.rectangle(image, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
         tf = max(tl - 1, 1)  # font thickness