test_cli.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. # Ultralytics YOLO 🚀, AGPL-3.0 license
  2. import subprocess
  3. from pathlib import Path
  4. import pytest
  5. from ultralytics.utils import ASSETS, SETTINGS
  6. WEIGHTS_DIR = Path(SETTINGS['weights_dir'])
  7. TASK_ARGS = [
  8. ('detect', 'yolov8n', 'coco8.yaml'),
  9. ('segment', 'yolov8n-seg', 'coco8-seg.yaml'),
  10. ('classify', 'yolov8n-cls', 'imagenet10'),
  11. ('pose', 'yolov8n-pose', 'coco8-pose.yaml'), ] # (task, model, data)
  12. EXPORT_ARGS = [
  13. ('yolov8n', 'torchscript'),
  14. ('yolov8n-seg', 'torchscript'),
  15. ('yolov8n-cls', 'torchscript'),
  16. ('yolov8n-pose', 'torchscript'), ] # (model, format)
  17. def run(cmd):
  18. # Run a subprocess command with check=True
  19. subprocess.run(cmd.split(), check=True)
  20. def test_special_modes():
  21. run('yolo help')
  22. run('yolo checks')
  23. run('yolo version')
  24. run('yolo settings reset')
  25. run('yolo cfg')
  26. @pytest.mark.parametrize('task,model,data', TASK_ARGS)
  27. def test_train(task, model, data):
  28. run(f'yolo train {task} model={model}.yaml data={data} imgsz=32 epochs=1 cache=disk')
  29. @pytest.mark.parametrize('task,model,data', TASK_ARGS)
  30. def test_val(task, model, data):
  31. # Download annotations to run pycocotools eval
  32. # from ultralytics.utils import SETTINGS, Path
  33. # from ultralytics.utils.downloads import download
  34. # url = 'https://github.com/ultralytics/assets/releases/download/v0.0.0/'
  35. # download(f'{url}instances_val2017.json', dir=Path(SETTINGS['datasets_dir']) / 'coco8/annotations')
  36. # download(f'{url}person_keypoints_val2017.json', dir=Path(SETTINGS['datasets_dir']) / 'coco8-pose/annotations')
  37. # Validate
  38. run(f'yolo val {task} model={WEIGHTS_DIR / model}.pt data={data} imgsz=32 save_txt save_json')
  39. @pytest.mark.parametrize('task,model,data', TASK_ARGS)
  40. def test_predict(task, model, data):
  41. run(f'yolo predict model={WEIGHTS_DIR / model}.pt source={ASSETS} imgsz=32 save save_crop save_txt')
  42. @pytest.mark.parametrize('model,format', EXPORT_ARGS)
  43. def test_export(model, format):
  44. run(f'yolo export model={WEIGHTS_DIR / model}.pt format={format} imgsz=32')
  45. def test_rtdetr(task='detect', model='yolov8n-rtdetr.yaml', data='coco8.yaml'):
  46. # Warning: MUST use imgsz=640
  47. run(f'yolo train {task} model={model} data={data} imgsz=640 epochs=1, cache = disk') # add coma, space to args
  48. run(f"yolo predict {task} model={model} source={ASSETS / 'bus.jpg'} imgsz=640 save save_crop save_txt")
  49. def test_fastsam(task='segment', model=WEIGHTS_DIR / 'FastSAM-s.pt', data='coco8-seg.yaml'):
  50. source = ASSETS / 'bus.jpg'
  51. run(f'yolo segment val {task} model={model} data={data} imgsz=32')
  52. run(f'yolo segment predict model={model} source={source} imgsz=32 save save_crop save_txt')
  53. from ultralytics import FastSAM
  54. from ultralytics.models.fastsam import FastSAMPrompt
  55. # Create a FastSAM model
  56. sam_model = FastSAM(model) # or FastSAM-x.pt
  57. # Run inference on an image
  58. everything_results = sam_model(source, device='cpu', retina_masks=True, imgsz=1024, conf=0.4, iou=0.9)
  59. # Everything prompt
  60. prompt_process = FastSAMPrompt(source, everything_results, device='cpu')
  61. ann = prompt_process.everything_prompt()
  62. # Bbox default shape [0,0,0,0] -> [x1,y1,x2,y2]
  63. ann = prompt_process.box_prompt(bbox=[200, 200, 300, 300])
  64. # Text prompt
  65. ann = prompt_process.text_prompt(text='a photo of a dog')
  66. # Point prompt
  67. # points default [[0,0]] [[x1,y1],[x2,y2]]
  68. # point_label default [0] [1,0] 0:background, 1:foreground
  69. ann = prompt_process.point_prompt(points=[[200, 200]], pointlabel=[1])
  70. prompt_process.plot(annotations=ann, output='./')
  71. def test_mobilesam():
  72. from ultralytics import SAM
  73. # Load the model
  74. model = SAM(WEIGHTS_DIR / 'mobile_sam.pt')
  75. # Source
  76. source = ASSETS / 'zidane.jpg'
  77. # Predict a segment based on a point prompt
  78. model.predict(source, points=[900, 370], labels=[1])
  79. # Predict a segment based on a box prompt
  80. model.predict(source, bboxes=[439, 437, 524, 709])
  81. # Predict all
  82. # model(source)
  83. # Slow Tests
  84. @pytest.mark.slow
  85. @pytest.mark.parametrize('task,model,data', TASK_ARGS)
  86. def test_train_gpu(task, model, data):
  87. run(f'yolo train {task} model={model}.yaml data={data} imgsz=32 epochs=1 device="0"') # single GPU
  88. run(f'yolo train {task} model={model}.pt data={data} imgsz=32 epochs=1 device="0,1"') # multi GPU