import gc import cv2 import imutils import threading import argparse # 导入argparse from AIDetector_pytorch import Detector def start_camera_detector(camera_id, width, height, detector): name = 'Demo Camera {}'.format(camera_id) cap = cv2.VideoCapture(camera_id, cv2.CAP_V4L2) if not cap.isOpened(): print('Error: Unable to open camera {}.'.format(camera_id)) return cap.set(cv2.CAP_PROP_FRAME_WIDTH, width) cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height) fps = cap.get(cv2.CAP_PROP_FPS) if fps <= 0: fps = 30 t = int(1000 / fps) print('{} fps:'.format(name), fps) frame_count = 0 while True: ret, im = cap.read() if not ret or im is None: break if frame_count % 3 == 0: result = detector.feedCap(im) result = result['frame'] result = imutils.resize(result, height=500) cv2.imshow(name, result) if cv2.waitKey(t) & 0xFF == ord('q'): break frame_count += 1 if frame_count % 30 == 0: gc.collect() cap.release() cv2.destroyWindow(name) def main(): parser = argparse.ArgumentParser(description='Camera Detection with ONNX.') parser.add_argument('--camera_count', type=int, default=6, help='Number of cameras to use.') parser.add_argument('--width', type=int, default=1280, help='Input the wight of the video image(default=1280)') parser.add_argument('--height', type=int, default=720, help='Input the height of the video image(default=720)') args = parser.parse_args() detector = Detector() threads = [] for i in range(args.camera_count): thread = threading.Thread(target=start_camera_detector, args=(i, args.width, args.height, detector)) thread.start() threads.append(thread) for thread in threads: thread.join() if __name__ == "__main__": main()