predict.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410
  1. # Ultralytics YOLO 🚀, AGPL-3.0 license
  2. import numpy as np
  3. import torch
  4. import torch.nn.functional as F
  5. import torchvision
  6. from ultralytics.data.augment import LetterBox
  7. from ultralytics.engine.predictor import BasePredictor
  8. from ultralytics.engine.results import Results
  9. from ultralytics.utils import DEFAULT_CFG, ops
  10. from ultralytics.utils.torch_utils import select_device
  11. from .amg import (batch_iterator, batched_mask_to_box, build_all_layer_point_grids, calculate_stability_score,
  12. generate_crop_boxes, is_box_near_crop_edge, remove_small_regions, uncrop_boxes_xyxy, uncrop_masks)
  13. from .build import build_sam
  14. class Predictor(BasePredictor):
  15. def __init__(self, cfg=DEFAULT_CFG, overrides=None, _callbacks=None):
  16. if overrides is None:
  17. overrides = {}
  18. overrides.update(dict(task='segment', mode='predict', imgsz=1024))
  19. super().__init__(cfg, overrides, _callbacks)
  20. # SAM needs retina_masks=True, or the results would be a mess.
  21. self.args.retina_masks = True
  22. # Args for set_image
  23. self.im = None
  24. self.features = None
  25. # Args for set_prompts
  26. self.prompts = {}
  27. # Args for segment everything
  28. self.segment_all = False
  29. def preprocess(self, im):
  30. """Prepares input image before inference.
  31. Args:
  32. im (torch.Tensor | List(np.ndarray)): BCHW for tensor, [(HWC) x B] for list.
  33. """
  34. if self.im is not None:
  35. return self.im
  36. not_tensor = not isinstance(im, torch.Tensor)
  37. if not_tensor:
  38. im = np.stack(self.pre_transform(im))
  39. im = im[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW, (n, 3, h, w)
  40. im = np.ascontiguousarray(im) # contiguous
  41. im = torch.from_numpy(im)
  42. img = im.to(self.device)
  43. img = img.half() if self.model.fp16 else img.float() # uint8 to fp16/32
  44. if not_tensor:
  45. img = (img - self.mean) / self.std
  46. return img
  47. def pre_transform(self, im):
  48. """
  49. Pre-transform input image before inference.
  50. Args:
  51. im (List(np.ndarray)): (N, 3, h, w) for tensor, [(h, w, 3) x N] for list.
  52. Returns:
  53. (list): A list of transformed images.
  54. """
  55. assert len(im) == 1, 'SAM model has not supported batch inference yet!'
  56. return [LetterBox(self.args.imgsz, auto=False, center=False)(image=x) for x in im]
  57. def inference(self, im, bboxes=None, points=None, labels=None, masks=None, multimask_output=False, *args, **kwargs):
  58. """
  59. Predict masks for the given input prompts, using the currently set image.
  60. Args:
  61. im (torch.Tensor): The preprocessed image, (N, C, H, W).
  62. bboxes (np.ndarray | List, None): (N, 4), in XYXY format.
  63. points (np.ndarray | List, None): (N, 2), Each point is in (X,Y) in pixels.
  64. labels (np.ndarray | List, None): (N, ), labels for the point prompts.
  65. 1 indicates a foreground point and 0 indicates a background point.
  66. masks (np.ndarray, None): A low resolution mask input to the model, typically
  67. coming from a previous prediction iteration. Has form (N, H, W), where
  68. for SAM, H=W=256.
  69. multimask_output (bool): If true, the model will return three masks.
  70. For ambiguous input prompts (such as a single click), this will often
  71. produce better masks than a single prediction. If only a single
  72. mask is needed, the model's predicted quality score can be used
  73. to select the best mask. For non-ambiguous prompts, such as multiple
  74. input prompts, multimask_output=False can give better results.
  75. Returns:
  76. (np.ndarray): The output masks in CxHxW format, where C is the
  77. number of masks, and (H, W) is the original image size.
  78. (np.ndarray): An array of length C containing the model's
  79. predictions for the quality of each mask.
  80. (np.ndarray): An array of shape CxHxW, where C is the number
  81. of masks and H=W=256. These low resolution logits can be passed to
  82. a subsequent iteration as mask input.
  83. """
  84. # Get prompts from self.prompts first
  85. bboxes = self.prompts.pop('bboxes', bboxes)
  86. points = self.prompts.pop('points', points)
  87. masks = self.prompts.pop('masks', masks)
  88. if all(i is None for i in [bboxes, points, masks]):
  89. return self.generate(im, *args, **kwargs)
  90. return self.prompt_inference(im, bboxes, points, labels, masks, multimask_output)
  91. def prompt_inference(self, im, bboxes=None, points=None, labels=None, masks=None, multimask_output=False):
  92. """
  93. Predict masks for the given input prompts, using the currently set image.
  94. Args:
  95. im (torch.Tensor): The preprocessed image, (N, C, H, W).
  96. bboxes (np.ndarray | List, None): (N, 4), in XYXY format.
  97. points (np.ndarray | List, None): (N, 2), Each point is in (X,Y) in pixels.
  98. labels (np.ndarray | List, None): (N, ), labels for the point prompts.
  99. 1 indicates a foreground point and 0 indicates a background point.
  100. masks (np.ndarray, None): A low resolution mask input to the model, typically
  101. coming from a previous prediction iteration. Has form (N, H, W), where
  102. for SAM, H=W=256.
  103. multimask_output (bool): If true, the model will return three masks.
  104. For ambiguous input prompts (such as a single click), this will often
  105. produce better masks than a single prediction. If only a single
  106. mask is needed, the model's predicted quality score can be used
  107. to select the best mask. For non-ambiguous prompts, such as multiple
  108. input prompts, multimask_output=False can give better results.
  109. Returns:
  110. (np.ndarray): The output masks in CxHxW format, where C is the
  111. number of masks, and (H, W) is the original image size.
  112. (np.ndarray): An array of length C containing the model's
  113. predictions for the quality of each mask.
  114. (np.ndarray): An array of shape CxHxW, where C is the number
  115. of masks and H=W=256. These low resolution logits can be passed to
  116. a subsequent iteration as mask input.
  117. """
  118. features = self.model.image_encoder(im) if self.features is None else self.features
  119. src_shape, dst_shape = self.batch[1][0].shape[:2], im.shape[2:]
  120. r = 1.0 if self.segment_all else min(dst_shape[0] / src_shape[0], dst_shape[1] / src_shape[1])
  121. # Transform input prompts
  122. if points is not None:
  123. points = torch.as_tensor(points, dtype=torch.float32, device=self.device)
  124. points = points[None] if points.ndim == 1 else points
  125. # Assuming labels are all positive if users don't pass labels.
  126. if labels is None:
  127. labels = np.ones(points.shape[0])
  128. labels = torch.as_tensor(labels, dtype=torch.int32, device=self.device)
  129. points *= r
  130. # (N, 2) --> (N, 1, 2), (N, ) --> (N, 1)
  131. points, labels = points[:, None, :], labels[:, None]
  132. if bboxes is not None:
  133. bboxes = torch.as_tensor(bboxes, dtype=torch.float32, device=self.device)
  134. bboxes = bboxes[None] if bboxes.ndim == 1 else bboxes
  135. bboxes *= r
  136. if masks is not None:
  137. masks = torch.as_tensor(masks, dtype=torch.float32, device=self.device)
  138. masks = masks[:, None, :, :]
  139. points = (points, labels) if points is not None else None
  140. # Embed prompts
  141. sparse_embeddings, dense_embeddings = self.model.prompt_encoder(
  142. points=points,
  143. boxes=bboxes,
  144. masks=masks,
  145. )
  146. # Predict masks
  147. pred_masks, pred_scores = self.model.mask_decoder(
  148. image_embeddings=features,
  149. image_pe=self.model.prompt_encoder.get_dense_pe(),
  150. sparse_prompt_embeddings=sparse_embeddings,
  151. dense_prompt_embeddings=dense_embeddings,
  152. multimask_output=multimask_output,
  153. )
  154. # (N, d, H, W) --> (N*d, H, W), (N, d) --> (N*d, )
  155. # `d` could be 1 or 3 depends on `multimask_output`.
  156. return pred_masks.flatten(0, 1), pred_scores.flatten(0, 1)
  157. def generate(self,
  158. im,
  159. crop_n_layers=0,
  160. crop_overlap_ratio=512 / 1500,
  161. crop_downscale_factor=1,
  162. point_grids=None,
  163. points_stride=32,
  164. points_batch_size=64,
  165. conf_thres=0.88,
  166. stability_score_thresh=0.95,
  167. stability_score_offset=0.95,
  168. crop_nms_thresh=0.7):
  169. """Segment the whole image.
  170. Args:
  171. im (torch.Tensor): The preprocessed image, (N, C, H, W).
  172. crop_n_layers (int): If >0, mask prediction will be run again on
  173. crops of the image. Sets the number of layers to run, where each
  174. layer has 2**i_layer number of image crops.
  175. crop_overlap_ratio (float): Sets the degree to which crops overlap.
  176. In the first crop layer, crops will overlap by this fraction of
  177. the image length. Later layers with more crops scale down this overlap.
  178. crop_downscale_factor (int): The number of points-per-side
  179. sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
  180. point_grids (list(np.ndarray), None): A list over explicit grids
  181. of points used for sampling, normalized to [0,1]. The nth grid in the
  182. list is used in the nth crop layer. Exclusive with points_per_side.
  183. points_stride (int, None): The number of points to be sampled
  184. along one side of the image. The total number of points is
  185. points_per_side**2. If None, 'point_grids' must provide explicit
  186. point sampling.
  187. points_batch_size (int): Sets the number of points run simultaneously
  188. by the model. Higher numbers may be faster but use more GPU memory.
  189. conf_thres (float): A filtering threshold in [0,1], using the
  190. model's predicted mask quality.
  191. stability_score_thresh (float): A filtering threshold in [0,1], using
  192. the stability of the mask under changes to the cutoff used to binarize
  193. the model's mask predictions.
  194. stability_score_offset (float): The amount to shift the cutoff when
  195. calculated the stability score.
  196. crop_nms_thresh (float): The box IoU cutoff used by non-maximal
  197. suppression to filter duplicate masks between different crops.
  198. """
  199. self.segment_all = True
  200. ih, iw = im.shape[2:]
  201. crop_regions, layer_idxs = generate_crop_boxes((ih, iw), crop_n_layers, crop_overlap_ratio)
  202. if point_grids is None:
  203. point_grids = build_all_layer_point_grids(
  204. points_stride,
  205. crop_n_layers,
  206. crop_downscale_factor,
  207. )
  208. pred_masks, pred_scores, pred_bboxes, region_areas = [], [], [], []
  209. for crop_region, layer_idx in zip(crop_regions, layer_idxs):
  210. x1, y1, x2, y2 = crop_region
  211. w, h = x2 - x1, y2 - y1
  212. area = torch.tensor(w * h, device=im.device)
  213. points_scale = np.array([[w, h]]) # w, h
  214. # Crop image and interpolate to input size
  215. crop_im = F.interpolate(im[..., y1:y2, x1:x2], (ih, iw), mode='bilinear', align_corners=False)
  216. # (num_points, 2)
  217. points_for_image = point_grids[layer_idx] * points_scale
  218. crop_masks, crop_scores, crop_bboxes = [], [], []
  219. for (points, ) in batch_iterator(points_batch_size, points_for_image):
  220. pred_mask, pred_score = self.prompt_inference(crop_im, points=points, multimask_output=True)
  221. # Interpolate predicted masks to input size
  222. pred_mask = F.interpolate(pred_mask[None], (h, w), mode='bilinear', align_corners=False)[0]
  223. idx = pred_score > conf_thres
  224. pred_mask, pred_score = pred_mask[idx], pred_score[idx]
  225. stability_score = calculate_stability_score(pred_mask, self.model.mask_threshold,
  226. stability_score_offset)
  227. idx = stability_score > stability_score_thresh
  228. pred_mask, pred_score = pred_mask[idx], pred_score[idx]
  229. # Bool type is much more memory-efficient.
  230. pred_mask = pred_mask > self.model.mask_threshold
  231. # (N, 4)
  232. pred_bbox = batched_mask_to_box(pred_mask).float()
  233. keep_mask = ~is_box_near_crop_edge(pred_bbox, crop_region, [0, 0, iw, ih])
  234. if not torch.all(keep_mask):
  235. pred_bbox = pred_bbox[keep_mask]
  236. pred_mask = pred_mask[keep_mask]
  237. pred_score = pred_score[keep_mask]
  238. crop_masks.append(pred_mask)
  239. crop_bboxes.append(pred_bbox)
  240. crop_scores.append(pred_score)
  241. # Do nms within this crop
  242. crop_masks = torch.cat(crop_masks)
  243. crop_bboxes = torch.cat(crop_bboxes)
  244. crop_scores = torch.cat(crop_scores)
  245. keep = torchvision.ops.nms(crop_bboxes, crop_scores, self.args.iou) # NMS
  246. crop_bboxes = uncrop_boxes_xyxy(crop_bboxes[keep], crop_region)
  247. crop_masks = uncrop_masks(crop_masks[keep], crop_region, ih, iw)
  248. crop_scores = crop_scores[keep]
  249. pred_masks.append(crop_masks)
  250. pred_bboxes.append(crop_bboxes)
  251. pred_scores.append(crop_scores)
  252. region_areas.append(area.expand(len(crop_masks)))
  253. pred_masks = torch.cat(pred_masks)
  254. pred_bboxes = torch.cat(pred_bboxes)
  255. pred_scores = torch.cat(pred_scores)
  256. region_areas = torch.cat(region_areas)
  257. # Remove duplicate masks between crops
  258. if len(crop_regions) > 1:
  259. scores = 1 / region_areas
  260. keep = torchvision.ops.nms(pred_bboxes, scores, crop_nms_thresh)
  261. pred_masks = pred_masks[keep]
  262. pred_bboxes = pred_bboxes[keep]
  263. pred_scores = pred_scores[keep]
  264. return pred_masks, pred_scores, pred_bboxes
  265. def setup_model(self, model, verbose=True):
  266. """Set up YOLO model with specified thresholds and device."""
  267. device = select_device(self.args.device, verbose=verbose)
  268. if model is None:
  269. model = build_sam(self.args.model)
  270. model.eval()
  271. self.model = model.to(device)
  272. self.device = device
  273. self.mean = torch.tensor([123.675, 116.28, 103.53]).view(-1, 1, 1).to(device)
  274. self.std = torch.tensor([58.395, 57.12, 57.375]).view(-1, 1, 1).to(device)
  275. # TODO: Temporary settings for compatibility
  276. self.model.pt = False
  277. self.model.triton = False
  278. self.model.stride = 32
  279. self.model.fp16 = False
  280. self.done_warmup = True
  281. def postprocess(self, preds, img, orig_imgs):
  282. """Post-processes inference output predictions to create detection masks for objects."""
  283. # (N, 1, H, W), (N, 1)
  284. pred_masks, pred_scores = preds[:2]
  285. pred_bboxes = preds[2] if self.segment_all else None
  286. names = dict(enumerate(str(i) for i in range(len(pred_masks))))
  287. results = []
  288. is_list = isinstance(orig_imgs, list) # input images are a list, not a torch.Tensor
  289. for i, masks in enumerate([pred_masks]):
  290. orig_img = orig_imgs[i] if is_list else orig_imgs
  291. if pred_bboxes is not None:
  292. pred_bboxes = ops.scale_boxes(img.shape[2:], pred_bboxes.float(), orig_img.shape, padding=False)
  293. cls = torch.arange(len(pred_masks), dtype=torch.int32, device=pred_masks.device)
  294. pred_bboxes = torch.cat([pred_bboxes, pred_scores[:, None], cls[:, None]], dim=-1)
  295. masks = ops.scale_masks(masks[None].float(), orig_img.shape[:2], padding=False)[0]
  296. masks = masks > self.model.mask_threshold # to bool
  297. img_path = self.batch[0][i]
  298. results.append(Results(orig_img, path=img_path, names=names, masks=masks, boxes=pred_bboxes))
  299. # Reset segment-all mode.
  300. self.segment_all = False
  301. return results
  302. def setup_source(self, source):
  303. """Sets up source and inference mode."""
  304. if source is not None:
  305. super().setup_source(source)
  306. def set_image(self, image):
  307. """Set image in advance.
  308. Args:
  309. image (str | np.ndarray): image file path or np.ndarray image by cv2.
  310. """
  311. if self.model is None:
  312. model = build_sam(self.args.model)
  313. self.setup_model(model)
  314. self.setup_source(image)
  315. assert len(self.dataset) == 1, '`set_image` only supports setting one image!'
  316. for batch in self.dataset:
  317. im = self.preprocess(batch[1])
  318. self.features = self.model.image_encoder(im)
  319. self.im = im
  320. break
  321. def set_prompts(self, prompts):
  322. """Set prompts in advance."""
  323. self.prompts = prompts
  324. def reset_image(self):
  325. self.im = None
  326. self.features = None
  327. @staticmethod
  328. def remove_small_regions(masks, min_area=0, nms_thresh=0.7):
  329. """
  330. Removes small disconnected regions and holes in masks, then reruns
  331. box NMS to remove any new duplicates. Requires open-cv as a dependency.
  332. Args:
  333. masks (torch.Tensor): Masks, (N, H, W).
  334. min_area (int): Minimum area threshold.
  335. nms_thresh (float): NMS threshold.
  336. """
  337. if len(masks) == 0:
  338. return masks
  339. # Filter small disconnected regions and holes
  340. new_masks = []
  341. scores = []
  342. for mask in masks:
  343. mask = mask.cpu().numpy()
  344. mask, changed = remove_small_regions(mask, min_area, mode='holes')
  345. unchanged = not changed
  346. mask, changed = remove_small_regions(mask, min_area, mode='islands')
  347. unchanged = unchanged and not changed
  348. new_masks.append(torch.as_tensor(mask).unsqueeze(0))
  349. # Give score=0 to changed masks and score=1 to unchanged masks
  350. # so NMS will prefer ones that didn't need postprocessing
  351. scores.append(float(unchanged))
  352. # Recalculate boxes and remove any new duplicates
  353. new_masks = torch.cat(new_masks, dim=0)
  354. boxes = batched_mask_to_box(new_masks)
  355. keep = torchvision.ops.nms(
  356. boxes.float(),
  357. torch.as_tensor(scores),
  358. nms_thresh,
  359. )
  360. # Only recalculate masks for masks that have changed
  361. for i in keep:
  362. if scores[i] == 0.0:
  363. masks[i] = new_masks[i]
  364. return masks[keep]