test_python.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. # Ultralytics YOLO 🚀, AGPL-3.0 license
  2. import shutil
  3. from copy import copy
  4. from pathlib import Path
  5. import cv2
  6. import numpy as np
  7. import pytest
  8. import torch
  9. from PIL import Image
  10. from torchvision.transforms import ToTensor
  11. from ultralytics import RTDETR, YOLO
  12. from ultralytics.data.build import load_inference_source
  13. from ultralytics.utils import ASSETS, DEFAULT_CFG, LINUX, ONLINE, ROOT, SETTINGS, WINDOWS
  14. from ultralytics.utils.downloads import download
  15. from ultralytics.utils.torch_utils import TORCH_1_9
  16. WEIGHTS_DIR = Path(SETTINGS['weights_dir'])
  17. MODEL = WEIGHTS_DIR / 'path with spaces' / 'yolov8n.pt' # test spaces in path
  18. CFG = 'yolov8n.yaml'
  19. SOURCE = ASSETS / 'bus.jpg'
  20. TMP = (ROOT / '../tests/tmp').resolve() # temp directory for test files
  21. def test_model_forward():
  22. model = YOLO(CFG)
  23. model(source=None, imgsz=32, augment=True) # also test no source and augment
  24. def test_model_methods():
  25. model = YOLO(MODEL)
  26. # Model methods
  27. model.info(verbose=True, detailed=True)
  28. model = model.reset_weights()
  29. model = model.load(MODEL)
  30. model.to('cpu')
  31. model.fuse()
  32. # Model properties
  33. _ = model.names
  34. _ = model.device
  35. _ = model.transforms
  36. _ = model.task_map
  37. def test_predict_txt():
  38. # Write a list of sources (file, dir, glob, recursive glob) to a txt file
  39. txt_file = TMP / 'sources.txt'
  40. with open(txt_file, 'w') as f:
  41. for x in [ASSETS / 'bus.jpg', ASSETS, ASSETS / '*', ASSETS / '**/*.jpg']:
  42. f.write(f'{x}\n')
  43. model = YOLO(MODEL)
  44. model(source=txt_file, imgsz=32)
  45. def test_predict_img():
  46. model = YOLO(MODEL)
  47. seg_model = YOLO(WEIGHTS_DIR / 'yolov8n-seg.pt')
  48. cls_model = YOLO(WEIGHTS_DIR / 'yolov8n-cls.pt')
  49. pose_model = YOLO(WEIGHTS_DIR / 'yolov8n-pose.pt')
  50. im = cv2.imread(str(SOURCE))
  51. assert len(model(source=Image.open(SOURCE), save=True, verbose=True, imgsz=32)) == 1 # PIL
  52. assert len(model(source=im, save=True, save_txt=True, imgsz=32)) == 1 # ndarray
  53. assert len(model(source=[im, im], save=True, save_txt=True, imgsz=32)) == 2 # batch
  54. assert len(list(model(source=[im, im], save=True, stream=True, imgsz=32))) == 2 # stream
  55. assert len(model(torch.zeros(320, 640, 3).numpy(), imgsz=32)) == 1 # tensor to numpy
  56. batch = [
  57. str(SOURCE), # filename
  58. Path(SOURCE), # Path
  59. 'https://ultralytics.com/images/zidane.jpg' if ONLINE else SOURCE, # URI
  60. cv2.imread(str(SOURCE)), # OpenCV
  61. Image.open(SOURCE), # PIL
  62. np.zeros((320, 640, 3))] # numpy
  63. assert len(model(batch, imgsz=32)) == len(batch) # multiple sources in a batch
  64. # Test tensor inference
  65. im = cv2.imread(str(SOURCE)) # OpenCV
  66. t = cv2.resize(im, (32, 32))
  67. t = ToTensor()(t)
  68. t = torch.stack([t, t, t, t])
  69. results = model(t, imgsz=32)
  70. assert len(results) == t.shape[0]
  71. results = seg_model(t, imgsz=32)
  72. assert len(results) == t.shape[0]
  73. results = cls_model(t, imgsz=32)
  74. assert len(results) == t.shape[0]
  75. results = pose_model(t, imgsz=32)
  76. assert len(results) == t.shape[0]
  77. def test_predict_grey_and_4ch():
  78. # Convert SOURCE to greyscale and 4-ch
  79. im = Image.open(SOURCE)
  80. directory = TMP / 'im4'
  81. directory.mkdir(parents=True, exist_ok=True)
  82. source_greyscale = directory / 'greyscale.jpg'
  83. source_rgba = directory / '4ch.png'
  84. source_non_utf = directory / 'non_UTF_测试文件_tést_image.jpg'
  85. source_spaces = directory / 'image with spaces.jpg'
  86. im.convert('L').save(source_greyscale) # greyscale
  87. im.convert('RGBA').save(source_rgba) # 4-ch PNG with alpha
  88. im.save(source_non_utf) # non-UTF characters in filename
  89. im.save(source_spaces) # spaces in filename
  90. # Inference
  91. model = YOLO(MODEL)
  92. for f in source_rgba, source_greyscale, source_non_utf, source_spaces:
  93. for source in Image.open(f), cv2.imread(str(f)), f:
  94. results = model(source, save=True, verbose=True, imgsz=32)
  95. assert len(results) == 1 # verify that an image was run
  96. f.unlink() # cleanup
  97. @pytest.mark.skipif(not ONLINE, reason='environment is offline')
  98. def test_track_stream():
  99. # Test YouTube streaming inference (short 10 frame video) with non-default ByteTrack tracker
  100. # imgsz=160 required for tracking for higher confidence and better matches
  101. import yaml
  102. model = YOLO(MODEL)
  103. model.predict('https://youtu.be/G17sBkb38XQ', imgsz=96, save=True)
  104. model.track('https://ultralytics.com/assets/decelera_portrait_min.mov', imgsz=160, tracker='bytetrack.yaml')
  105. model.track('https://ultralytics.com/assets/decelera_portrait_min.mov', imgsz=160, tracker='botsort.yaml')
  106. # Test Global Motion Compensation (GMC) methods
  107. for gmc in 'orb', 'sift', 'ecc':
  108. with open(ROOT / 'cfg/trackers/botsort.yaml') as f:
  109. data = yaml.safe_load(f)
  110. tracker = TMP / f'botsort-{gmc}.yaml'
  111. data['gmc_method'] = gmc
  112. with open(tracker, 'w') as f:
  113. yaml.safe_dump(data, f)
  114. model.track('https://ultralytics.com/assets/decelera_portrait_min.mov', imgsz=160, tracker=tracker)
  115. def test_val():
  116. model = YOLO(MODEL)
  117. model.val(data='coco8.yaml', imgsz=32, save_hybrid=True)
  118. def test_train_scratch():
  119. model = YOLO(CFG)
  120. model.train(data='coco8.yaml', epochs=2, imgsz=32, cache='disk', batch=-1, close_mosaic=1, name='model')
  121. model(SOURCE)
  122. def test_train_pretrained():
  123. model = YOLO(WEIGHTS_DIR / 'yolov8n-seg.pt')
  124. model.train(data='coco8-seg.yaml', epochs=1, imgsz=32, cache='ram', copy_paste=0.5, mixup=0.5, name=0)
  125. model(SOURCE)
  126. def test_export_torchscript():
  127. model = YOLO(MODEL)
  128. f = model.export(format='torchscript', optimize=True)
  129. YOLO(f)(SOURCE) # exported model inference
  130. def test_export_onnx():
  131. model = YOLO(MODEL)
  132. f = model.export(format='onnx', dynamic=True)
  133. YOLO(f)(SOURCE) # exported model inference
  134. def test_export_openvino():
  135. model = YOLO(MODEL)
  136. f = model.export(format='openvino')
  137. YOLO(f)(SOURCE) # exported model inference
  138. def test_export_coreml():
  139. if not WINDOWS: # RuntimeError: BlobWriter not loaded with coremltools 7.0 on windows
  140. model = YOLO(MODEL)
  141. model.export(format='coreml', nms=True)
  142. # if MACOS:
  143. # YOLO(f)(SOURCE) # model prediction only supported on macOS
  144. def test_export_tflite(enabled=False):
  145. # TF suffers from install conflicts on Windows and macOS
  146. if enabled and LINUX:
  147. model = YOLO(MODEL)
  148. f = model.export(format='tflite')
  149. YOLO(f)(SOURCE)
  150. def test_export_pb(enabled=False):
  151. # TF suffers from install conflicts on Windows and macOS
  152. if enabled and LINUX:
  153. model = YOLO(MODEL)
  154. f = model.export(format='pb')
  155. YOLO(f)(SOURCE)
  156. def test_export_paddle(enabled=False):
  157. # Paddle protobuf requirements conflicting with onnx protobuf requirements
  158. if enabled:
  159. model = YOLO(MODEL)
  160. model.export(format='paddle')
  161. def test_export_ncnn():
  162. model = YOLO(MODEL)
  163. f = model.export(format='ncnn')
  164. YOLO(f)(SOURCE) # exported model inference
  165. def test_all_model_yamls():
  166. for m in (ROOT / 'cfg' / 'models').rglob('*.yaml'):
  167. if 'rtdetr' in m.name:
  168. if TORCH_1_9: # torch<=1.8 issue - TypeError: __init__() got an unexpected keyword argument 'batch_first'
  169. RTDETR(m.name)(SOURCE, imgsz=640)
  170. else:
  171. YOLO(m.name)
  172. def test_workflow():
  173. model = YOLO(MODEL)
  174. model.train(data='coco8.yaml', epochs=1, imgsz=32)
  175. model.val()
  176. model.predict(SOURCE)
  177. model.export(format='onnx') # export a model to ONNX format
  178. def test_predict_callback_and_setup():
  179. # Test callback addition for prediction
  180. def on_predict_batch_end(predictor): # results -> List[batch_size]
  181. path, im0s, _, _ = predictor.batch
  182. im0s = im0s if isinstance(im0s, list) else [im0s]
  183. bs = [predictor.dataset.bs for _ in range(len(path))]
  184. predictor.results = zip(predictor.results, im0s, bs)
  185. model = YOLO(MODEL)
  186. model.add_callback('on_predict_batch_end', on_predict_batch_end)
  187. dataset = load_inference_source(source=SOURCE)
  188. bs = dataset.bs # noqa access predictor properties
  189. results = model.predict(dataset, stream=True) # source already setup
  190. for r, im0, bs in results:
  191. print('test_callback', im0.shape)
  192. print('test_callback', bs)
  193. boxes = r.boxes # Boxes object for bbox outputs
  194. print(boxes)
  195. def test_results():
  196. for m in 'yolov8n-pose.pt', 'yolov8n-seg.pt', 'yolov8n.pt', 'yolov8n-cls.pt':
  197. model = YOLO(m)
  198. results = model([SOURCE, SOURCE])
  199. for r in results:
  200. r = r.cpu().numpy()
  201. r = r.to(device='cpu', dtype=torch.float32)
  202. r.save_txt(txt_file='runs/tests/label.txt', save_conf=True)
  203. r.save_crop(save_dir='runs/tests/crops/')
  204. r.tojson(normalize=True)
  205. r.plot(pil=True)
  206. r.plot(conf=True, boxes=True)
  207. print(r)
  208. print(r.path)
  209. for k in r.keys:
  210. print(getattr(r, k))
  211. @pytest.mark.skipif(not ONLINE, reason='environment is offline')
  212. def test_data_utils():
  213. # Test functions in ultralytics/data/utils.py
  214. from ultralytics.data.utils import HUBDatasetStats, autosplit
  215. from ultralytics.utils.downloads import zip_directory
  216. # from ultralytics.utils.files import WorkingDirectory
  217. # with WorkingDirectory(ROOT.parent / 'tests'):
  218. download('https://github.com/ultralytics/hub/raw/main/example_datasets/coco8.zip', unzip=False)
  219. shutil.move('coco8.zip', TMP)
  220. stats = HUBDatasetStats(TMP / 'coco8.zip', task='detect')
  221. stats.get_json(save=True)
  222. stats.process_images()
  223. autosplit(TMP / 'coco8')
  224. zip_directory(TMP / 'coco8/images/val') # zip
  225. @pytest.mark.skipif(not ONLINE, reason='environment is offline')
  226. def test_data_converter():
  227. # Test dataset converters
  228. from ultralytics.data.converter import coco80_to_coco91_class, convert_coco
  229. file = 'instances_val2017.json'
  230. download(f'https://github.com/ultralytics/yolov5/releases/download/v1.0/{file}')
  231. shutil.move(file, TMP)
  232. convert_coco(labels_dir=TMP, use_segments=True, use_keypoints=False, cls91to80=True)
  233. coco80_to_coco91_class()
  234. def test_data_annotator():
  235. from ultralytics.data.annotator import auto_annotate
  236. auto_annotate(ASSETS, det_model='yolov8n.pt', sam_model='mobile_sam.pt', output_dir=TMP / 'auto_annotate_labels')
  237. def test_events():
  238. # Test event sending
  239. from ultralytics.hub.utils import Events
  240. events = Events()
  241. events.enabled = True
  242. cfg = copy(DEFAULT_CFG) # does not require deepcopy
  243. cfg.mode = 'test'
  244. events(cfg)
  245. def test_utils_init():
  246. from ultralytics.utils import get_git_branch, get_git_origin_url, get_ubuntu_version, is_github_actions_ci
  247. get_ubuntu_version()
  248. is_github_actions_ci()
  249. get_git_origin_url()
  250. get_git_branch()
  251. def test_utils_checks():
  252. from ultralytics.utils.checks import (check_imgsz, check_imshow, check_requirements, check_yolov5u_filename,
  253. git_describe, print_args)
  254. check_yolov5u_filename('yolov5n.pt')
  255. # check_imshow(warn=True)
  256. git_describe(ROOT)
  257. check_requirements() # check requirements.txt
  258. check_imgsz([600, 600], max_dim=1)
  259. check_imshow()
  260. print_args()
  261. def test_utils_benchmarks():
  262. from ultralytics.utils.benchmarks import ProfileModels
  263. ProfileModels(['yolov8n.yaml'], imgsz=32, min_time=1, num_timed_runs=3, num_warmup_runs=1).profile()
  264. def test_utils_torchutils():
  265. from ultralytics.nn.modules.conv import Conv
  266. from ultralytics.utils.torch_utils import get_flops_with_torch_profiler, profile, time_sync
  267. x = torch.randn(1, 64, 20, 20)
  268. m = Conv(64, 64, k=1, s=2)
  269. profile(x, [m], n=3)
  270. get_flops_with_torch_profiler(m)
  271. time_sync()
  272. def test_utils_downloads():
  273. from ultralytics.utils.downloads import get_google_drive_file_info
  274. get_google_drive_file_info('https://drive.google.com/file/d/1cqT-cJgANNrhIHCrEufUYhQ4RqiWG_lJ/view?usp=drive_link')
  275. def test_utils_ops():
  276. from ultralytics.utils.ops import (ltwh2xywh, ltwh2xyxy, make_divisible, xywh2ltwh, xywh2xyxy, xywhn2xyxy,
  277. xywhr2xyxyxyxy, xyxy2ltwh, xyxy2xywh, xyxy2xywhn, xyxyxyxy2xywhr)
  278. make_divisible(17, torch.tensor([8]))
  279. boxes = torch.rand(10, 4) # xywh
  280. torch.allclose(boxes, xyxy2xywh(xywh2xyxy(boxes)))
  281. torch.allclose(boxes, xyxy2xywhn(xywhn2xyxy(boxes)))
  282. torch.allclose(boxes, ltwh2xywh(xywh2ltwh(boxes)))
  283. torch.allclose(boxes, xyxy2ltwh(ltwh2xyxy(boxes)))
  284. boxes = torch.rand(10, 5) # xywhr for OBB
  285. boxes[:, 4] = torch.randn(10) * 30
  286. torch.allclose(boxes, xyxyxyxy2xywhr(xywhr2xyxyxyxy(boxes)), rtol=1e-3)
  287. def test_utils_files():
  288. from ultralytics.utils.files import file_age, file_date, get_latest_run, spaces_in_path
  289. file_age(SOURCE)
  290. file_date(SOURCE)
  291. get_latest_run(ROOT / 'runs')
  292. path = TMP / 'path/with spaces'
  293. path.mkdir(parents=True, exist_ok=True)
  294. with spaces_in_path(path) as new_path:
  295. print(new_path)
  296. def test_nn_modules_conv():
  297. from ultralytics.nn.modules.conv import CBAM, Conv2, ConvTranspose, DWConvTranspose2d, Focus
  298. c1, c2 = 8, 16 # input and output channels
  299. x = torch.zeros(4, c1, 10, 10) # BCHW
  300. # Run all modules not otherwise covered in tests
  301. DWConvTranspose2d(c1, c2)(x)
  302. ConvTranspose(c1, c2)(x)
  303. Focus(c1, c2)(x)
  304. CBAM(c1)(x)
  305. # Fuse ops
  306. m = Conv2(c1, c2)
  307. m.fuse_convs()
  308. m(x)
  309. def test_nn_modules_block():
  310. from ultralytics.nn.modules.block import C1, C3TR, BottleneckCSP, C3Ghost, C3x
  311. c1, c2 = 8, 16 # input and output channels
  312. x = torch.zeros(4, c1, 10, 10) # BCHW
  313. # Run all modules not otherwise covered in tests
  314. C1(c1, c2)(x)
  315. C3x(c1, c2)(x)
  316. C3TR(c1, c2)(x)
  317. C3Ghost(c1, c2)(x)
  318. BottleneckCSP(c1, c2)(x)
  319. def test_hub():
  320. from ultralytics.hub import export_fmts_hub, logout
  321. export_fmts_hub()
  322. logout()