train.py 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. # Ultralytics YOLO 🚀, AGPL-3.0 license
  2. from copy import copy
  3. import numpy as np
  4. from ultralytics.data import build_dataloader, build_yolo_dataset
  5. from ultralytics.engine.trainer import BaseTrainer
  6. from ultralytics.models import yolo
  7. from ultralytics.nn.tasks import DetectionModel
  8. from ultralytics.utils import LOGGER, RANK
  9. from ultralytics.utils.plotting import plot_images, plot_labels, plot_results
  10. from ultralytics.utils.torch_utils import de_parallel, torch_distributed_zero_first
  11. class DetectionTrainer(BaseTrainer):
  12. """
  13. A class extending the BaseTrainer class for training based on a detection model.
  14. Example:
  15. ```python
  16. from ultralytics.models.yolo.detect import DetectionTrainer
  17. args = dict(model='yolov8n.pt', data='coco8.yaml', epochs=3)
  18. trainer = DetectionTrainer(overrides=args)
  19. trainer.train()
  20. ```
  21. """
  22. def build_dataset(self, img_path, mode='train', batch=None):
  23. """
  24. Build YOLO Dataset.
  25. Args:
  26. img_path (str): Path to the folder containing images.
  27. mode (str): `train` mode or `val` mode, users are able to customize different augmentations for each mode.
  28. batch (int, optional): Size of batches, this is for `rect`. Defaults to None.
  29. """
  30. gs = max(int(de_parallel(self.model).stride.max() if self.model else 0), 32)
  31. return build_yolo_dataset(self.args, img_path, batch, self.data, mode=mode, rect=mode == 'val', stride=gs)
  32. def get_dataloader(self, dataset_path, batch_size=16, rank=0, mode='train'):
  33. """Construct and return dataloader."""
  34. assert mode in ['train', 'val']
  35. with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP
  36. dataset = self.build_dataset(dataset_path, mode, batch_size)
  37. shuffle = mode == 'train'
  38. if getattr(dataset, 'rect', False) and shuffle:
  39. LOGGER.warning("WARNING ⚠️ 'rect=True' is incompatible with DataLoader shuffle, setting shuffle=False")
  40. shuffle = False
  41. workers = self.args.workers if mode == 'train' else self.args.workers * 2
  42. return build_dataloader(dataset, batch_size, workers, shuffle, rank) # return dataloader
  43. def preprocess_batch(self, batch):
  44. """Preprocesses a batch of images by scaling and converting to float."""
  45. batch['img'] = batch['img'].to(self.device, non_blocking=True).float() / 255
  46. return batch
  47. def set_model_attributes(self):
  48. """nl = de_parallel(self.model).model[-1].nl # number of detection layers (to scale hyps)."""
  49. # self.args.box *= 3 / nl # scale to layers
  50. # self.args.cls *= self.data["nc"] / 80 * 3 / nl # scale to classes and layers
  51. # self.args.cls *= (self.args.imgsz / 640) ** 2 * 3 / nl # scale to image size and layers
  52. self.model.nc = self.data['nc'] # attach number of classes to model
  53. self.model.names = self.data['names'] # attach class names to model
  54. self.model.args = self.args # attach hyperparameters to model
  55. # TODO: self.model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) * nc
  56. def get_model(self, cfg=None, weights=None, verbose=True):
  57. """Return a YOLO detection model."""
  58. model = DetectionModel(cfg, nc=self.data['nc'], verbose=verbose and RANK == -1)
  59. if weights:
  60. model.load(weights)
  61. return model
  62. def get_validator(self):
  63. """Returns a DetectionValidator for YOLO model validation."""
  64. self.loss_names = 'box_loss', 'cls_loss', 'dfl_loss'
  65. return yolo.detect.DetectionValidator(self.test_loader, save_dir=self.save_dir, args=copy(self.args))
  66. def label_loss_items(self, loss_items=None, prefix='train'):
  67. """
  68. Returns a loss dict with labelled training loss items tensor. Not needed for classification but necessary for
  69. segmentation & detection
  70. """
  71. keys = [f'{prefix}/{x}' for x in self.loss_names]
  72. if loss_items is not None:
  73. loss_items = [round(float(x), 5) for x in loss_items] # convert tensors to 5 decimal place floats
  74. return dict(zip(keys, loss_items))
  75. else:
  76. return keys
  77. def progress_string(self):
  78. """Returns a formatted string of training progress with epoch, GPU memory, loss, instances and size."""
  79. return ('\n' + '%11s' *
  80. (4 + len(self.loss_names))) % ('Epoch', 'GPU_mem', *self.loss_names, 'Instances', 'Size')
  81. def plot_training_samples(self, batch, ni):
  82. """Plots training samples with their annotations."""
  83. plot_images(images=batch['img'],
  84. batch_idx=batch['batch_idx'],
  85. cls=batch['cls'].squeeze(-1),
  86. bboxes=batch['bboxes'],
  87. paths=batch['im_file'],
  88. fname=self.save_dir / f'train_batch{ni}.jpg',
  89. on_plot=self.on_plot)
  90. def plot_metrics(self):
  91. """Plots metrics from a CSV file."""
  92. plot_results(file=self.csv, on_plot=self.on_plot) # save results.png
  93. def plot_training_labels(self):
  94. """Create a labeled training plot of the YOLO model."""
  95. boxes = np.concatenate([lb['bboxes'] for lb in self.train_loader.dataset.labels], 0)
  96. cls = np.concatenate([lb['cls'] for lb in self.train_loader.dataset.labels], 0)
  97. plot_labels(boxes, cls.squeeze(), names=self.data['names'], save_dir=self.save_dir, on_plot=self.on_plot)