_stereo_matching.py 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224
  1. import functools
  2. import json
  3. import os
  4. import random
  5. import shutil
  6. from abc import ABC, abstractmethod
  7. from glob import glob
  8. from pathlib import Path
  9. from typing import Callable, cast, List, Optional, Tuple, Union
  10. import numpy as np
  11. from PIL import Image
  12. from .utils import _read_pfm, download_and_extract_archive, verify_str_arg
  13. from .vision import VisionDataset
  14. T1 = Tuple[Image.Image, Image.Image, Optional[np.ndarray], np.ndarray]
  15. T2 = Tuple[Image.Image, Image.Image, Optional[np.ndarray]]
  16. __all__ = ()
  17. _read_pfm_file = functools.partial(_read_pfm, slice_channels=1)
  18. class StereoMatchingDataset(ABC, VisionDataset):
  19. """Base interface for Stereo matching datasets"""
  20. _has_built_in_disparity_mask = False
  21. def __init__(self, root: str, transforms: Optional[Callable] = None) -> None:
  22. """
  23. Args:
  24. root(str): Root directory of the dataset.
  25. transforms(callable, optional): A function/transform that takes in Tuples of
  26. (images, disparities, valid_masks) and returns a transformed version of each of them.
  27. images is a Tuple of (``PIL.Image``, ``PIL.Image``)
  28. disparities is a Tuple of (``np.ndarray``, ``np.ndarray``) with shape (1, H, W)
  29. valid_masks is a Tuple of (``np.ndarray``, ``np.ndarray``) with shape (H, W)
  30. In some cases, when a dataset does not provide disparities, the ``disparities`` and
  31. ``valid_masks`` can be Tuples containing None values.
  32. For training splits generally the datasets provide a minimal guarantee of
  33. images: (``PIL.Image``, ``PIL.Image``)
  34. disparities: (``np.ndarray``, ``None``) with shape (1, H, W)
  35. Optionally, based on the dataset, it can return a ``mask`` as well:
  36. valid_masks: (``np.ndarray | None``, ``None``) with shape (H, W)
  37. For some test splits, the datasets provides outputs that look like:
  38. imgaes: (``PIL.Image``, ``PIL.Image``)
  39. disparities: (``None``, ``None``)
  40. Optionally, based on the dataset, it can return a ``mask`` as well:
  41. valid_masks: (``None``, ``None``)
  42. """
  43. super().__init__(root=root)
  44. self.transforms = transforms
  45. self._images = [] # type: ignore
  46. self._disparities = [] # type: ignore
  47. def _read_img(self, file_path: Union[str, Path]) -> Image.Image:
  48. img = Image.open(file_path)
  49. if img.mode != "RGB":
  50. img = img.convert("RGB")
  51. return img
  52. def _scan_pairs(
  53. self,
  54. paths_left_pattern: str,
  55. paths_right_pattern: Optional[str] = None,
  56. ) -> List[Tuple[str, Optional[str]]]:
  57. left_paths = list(sorted(glob(paths_left_pattern)))
  58. right_paths: List[Union[None, str]]
  59. if paths_right_pattern:
  60. right_paths = list(sorted(glob(paths_right_pattern)))
  61. else:
  62. right_paths = list(None for _ in left_paths)
  63. if not left_paths:
  64. raise FileNotFoundError(f"Could not find any files matching the patterns: {paths_left_pattern}")
  65. if not right_paths:
  66. raise FileNotFoundError(f"Could not find any files matching the patterns: {paths_right_pattern}")
  67. if len(left_paths) != len(right_paths):
  68. raise ValueError(
  69. f"Found {len(left_paths)} left files but {len(right_paths)} right files using:\n "
  70. f"left pattern: {paths_left_pattern}\n"
  71. f"right pattern: {paths_right_pattern}\n"
  72. )
  73. paths = list((left, right) for left, right in zip(left_paths, right_paths))
  74. return paths
  75. @abstractmethod
  76. def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
  77. # function that returns a disparity map and an occlusion map
  78. pass
  79. def __getitem__(self, index: int) -> Union[T1, T2]:
  80. """Return example at given index.
  81. Args:
  82. index(int): The index of the example to retrieve
  83. Returns:
  84. tuple: A 3 or 4-tuple with ``(img_left, img_right, disparity, Optional[valid_mask])`` where ``valid_mask``
  85. can be a numpy boolean mask of shape (H, W) if the dataset provides a file
  86. indicating which disparity pixels are valid. The disparity is a numpy array of
  87. shape (1, H, W) and the images are PIL images. ``disparity`` is None for
  88. datasets on which for ``split="test"`` the authors did not provide annotations.
  89. """
  90. img_left = self._read_img(self._images[index][0])
  91. img_right = self._read_img(self._images[index][1])
  92. dsp_map_left, valid_mask_left = self._read_disparity(self._disparities[index][0])
  93. dsp_map_right, valid_mask_right = self._read_disparity(self._disparities[index][1])
  94. imgs = (img_left, img_right)
  95. dsp_maps = (dsp_map_left, dsp_map_right)
  96. valid_masks = (valid_mask_left, valid_mask_right)
  97. if self.transforms is not None:
  98. (
  99. imgs,
  100. dsp_maps,
  101. valid_masks,
  102. ) = self.transforms(imgs, dsp_maps, valid_masks)
  103. if self._has_built_in_disparity_mask or valid_masks[0] is not None:
  104. return imgs[0], imgs[1], dsp_maps[0], cast(np.ndarray, valid_masks[0])
  105. else:
  106. return imgs[0], imgs[1], dsp_maps[0]
  107. def __len__(self) -> int:
  108. return len(self._images)
  109. class CarlaStereo(StereoMatchingDataset):
  110. """
  111. Carla simulator data linked in the `CREStereo github repo <https://github.com/megvii-research/CREStereo>`_.
  112. The dataset is expected to have the following structure: ::
  113. root
  114. carla-highres
  115. trainingF
  116. scene1
  117. img0.png
  118. img1.png
  119. disp0GT.pfm
  120. disp1GT.pfm
  121. calib.txt
  122. scene2
  123. img0.png
  124. img1.png
  125. disp0GT.pfm
  126. disp1GT.pfm
  127. calib.txt
  128. ...
  129. Args:
  130. root (string): Root directory where `carla-highres` is located.
  131. transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
  132. """
  133. def __init__(self, root: str, transforms: Optional[Callable] = None) -> None:
  134. super().__init__(root, transforms)
  135. root = Path(root) / "carla-highres"
  136. left_image_pattern = str(root / "trainingF" / "*" / "im0.png")
  137. right_image_pattern = str(root / "trainingF" / "*" / "im1.png")
  138. imgs = self._scan_pairs(left_image_pattern, right_image_pattern)
  139. self._images = imgs
  140. left_disparity_pattern = str(root / "trainingF" / "*" / "disp0GT.pfm")
  141. right_disparity_pattern = str(root / "trainingF" / "*" / "disp1GT.pfm")
  142. disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
  143. self._disparities = disparities
  144. def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
  145. disparity_map = _read_pfm_file(file_path)
  146. disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
  147. valid_mask = None
  148. return disparity_map, valid_mask
  149. def __getitem__(self, index: int) -> T1:
  150. """Return example at given index.
  151. Args:
  152. index(int): The index of the example to retrieve
  153. Returns:
  154. tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
  155. The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
  156. If a ``valid_mask`` is generated within the ``transforms`` parameter,
  157. a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
  158. """
  159. return cast(T1, super().__getitem__(index))
  160. class Kitti2012Stereo(StereoMatchingDataset):
  161. """
  162. KITTI dataset from the `2012 stereo evaluation benchmark <http://www.cvlibs.net/datasets/kitti/eval_stereo_flow.php>`_.
  163. Uses the RGB images for consistency with KITTI 2015.
  164. The dataset is expected to have the following structure: ::
  165. root
  166. Kitti2012
  167. testing
  168. colored_0
  169. 1_10.png
  170. 2_10.png
  171. ...
  172. colored_1
  173. 1_10.png
  174. 2_10.png
  175. ...
  176. training
  177. colored_0
  178. 1_10.png
  179. 2_10.png
  180. ...
  181. colored_1
  182. 1_10.png
  183. 2_10.png
  184. ...
  185. disp_noc
  186. 1.png
  187. 2.png
  188. ...
  189. calib
  190. Args:
  191. root (string): Root directory where `Kitti2012` is located.
  192. split (string, optional): The dataset split of scenes, either "train" (default) or "test".
  193. transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
  194. """
  195. _has_built_in_disparity_mask = True
  196. def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
  197. super().__init__(root, transforms)
  198. verify_str_arg(split, "split", valid_values=("train", "test"))
  199. root = Path(root) / "Kitti2012" / (split + "ing")
  200. left_img_pattern = str(root / "colored_0" / "*_10.png")
  201. right_img_pattern = str(root / "colored_1" / "*_10.png")
  202. self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
  203. if split == "train":
  204. disparity_pattern = str(root / "disp_noc" / "*.png")
  205. self._disparities = self._scan_pairs(disparity_pattern, None)
  206. else:
  207. self._disparities = list((None, None) for _ in self._images)
  208. def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], None]:
  209. # test split has no disparity maps
  210. if file_path is None:
  211. return None, None
  212. disparity_map = np.asarray(Image.open(file_path)) / 256.0
  213. # unsqueeze the disparity map into (C, H, W) format
  214. disparity_map = disparity_map[None, :, :]
  215. valid_mask = None
  216. return disparity_map, valid_mask
  217. def __getitem__(self, index: int) -> T1:
  218. """Return example at given index.
  219. Args:
  220. index(int): The index of the example to retrieve
  221. Returns:
  222. tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
  223. The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
  224. ``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
  225. generate a valid mask.
  226. Both ``disparity`` and ``valid_mask`` are ``None`` if the dataset split is test.
  227. """
  228. return cast(T1, super().__getitem__(index))
  229. class Kitti2015Stereo(StereoMatchingDataset):
  230. """
  231. KITTI dataset from the `2015 stereo evaluation benchmark <http://www.cvlibs.net/datasets/kitti/eval_scene_flow.php>`_.
  232. The dataset is expected to have the following structure: ::
  233. root
  234. Kitti2015
  235. testing
  236. image_2
  237. img1.png
  238. img2.png
  239. ...
  240. image_3
  241. img1.png
  242. img2.png
  243. ...
  244. training
  245. image_2
  246. img1.png
  247. img2.png
  248. ...
  249. image_3
  250. img1.png
  251. img2.png
  252. ...
  253. disp_occ_0
  254. img1.png
  255. img2.png
  256. ...
  257. disp_occ_1
  258. img1.png
  259. img2.png
  260. ...
  261. calib
  262. Args:
  263. root (string): Root directory where `Kitti2015` is located.
  264. split (string, optional): The dataset split of scenes, either "train" (default) or "test".
  265. transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
  266. """
  267. _has_built_in_disparity_mask = True
  268. def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
  269. super().__init__(root, transforms)
  270. verify_str_arg(split, "split", valid_values=("train", "test"))
  271. root = Path(root) / "Kitti2015" / (split + "ing")
  272. left_img_pattern = str(root / "image_2" / "*.png")
  273. right_img_pattern = str(root / "image_3" / "*.png")
  274. self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
  275. if split == "train":
  276. left_disparity_pattern = str(root / "disp_occ_0" / "*.png")
  277. right_disparity_pattern = str(root / "disp_occ_1" / "*.png")
  278. self._disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
  279. else:
  280. self._disparities = list((None, None) for _ in self._images)
  281. def _read_disparity(self, file_path: str) -> Tuple[Optional[np.ndarray], None]:
  282. # test split has no disparity maps
  283. if file_path is None:
  284. return None, None
  285. disparity_map = np.asarray(Image.open(file_path)) / 256.0
  286. # unsqueeze the disparity map into (C, H, W) format
  287. disparity_map = disparity_map[None, :, :]
  288. valid_mask = None
  289. return disparity_map, valid_mask
  290. def __getitem__(self, index: int) -> T1:
  291. """Return example at given index.
  292. Args:
  293. index(int): The index of the example to retrieve
  294. Returns:
  295. tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
  296. The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
  297. ``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
  298. generate a valid mask.
  299. Both ``disparity`` and ``valid_mask`` are ``None`` if the dataset split is test.
  300. """
  301. return cast(T1, super().__getitem__(index))
  302. class Middlebury2014Stereo(StereoMatchingDataset):
  303. """Publicly available scenes from the Middlebury dataset `2014 version <https://vision.middlebury.edu/stereo/data/scenes2014/>`.
  304. The dataset mostly follows the original format, without containing the ambient subdirectories. : ::
  305. root
  306. Middlebury2014
  307. train
  308. scene1-{perfect,imperfect}
  309. calib.txt
  310. im{0,1}.png
  311. im1E.png
  312. im1L.png
  313. disp{0,1}.pfm
  314. disp{0,1}-n.png
  315. disp{0,1}-sd.pfm
  316. disp{0,1}y.pfm
  317. scene2-{perfect,imperfect}
  318. calib.txt
  319. im{0,1}.png
  320. im1E.png
  321. im1L.png
  322. disp{0,1}.pfm
  323. disp{0,1}-n.png
  324. disp{0,1}-sd.pfm
  325. disp{0,1}y.pfm
  326. ...
  327. additional
  328. scene1-{perfect,imperfect}
  329. calib.txt
  330. im{0,1}.png
  331. im1E.png
  332. im1L.png
  333. disp{0,1}.pfm
  334. disp{0,1}-n.png
  335. disp{0,1}-sd.pfm
  336. disp{0,1}y.pfm
  337. ...
  338. test
  339. scene1
  340. calib.txt
  341. im{0,1}.png
  342. scene2
  343. calib.txt
  344. im{0,1}.png
  345. ...
  346. Args:
  347. root (string): Root directory of the Middleburry 2014 Dataset.
  348. split (string, optional): The dataset split of scenes, either "train" (default), "test", or "additional"
  349. use_ambient_views (boolean, optional): Whether to use different expose or lightning views when possible.
  350. The dataset samples with equal probability between ``[im1.png, im1E.png, im1L.png]``.
  351. calibration (string, optional): Whether or not to use the calibrated (default) or uncalibrated scenes.
  352. transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
  353. download (boolean, optional): Whether or not to download the dataset in the ``root`` directory.
  354. """
  355. splits = {
  356. "train": [
  357. "Adirondack",
  358. "Jadeplant",
  359. "Motorcycle",
  360. "Piano",
  361. "Pipes",
  362. "Playroom",
  363. "Playtable",
  364. "Recycle",
  365. "Shelves",
  366. "Vintage",
  367. ],
  368. "additional": [
  369. "Backpack",
  370. "Bicycle1",
  371. "Cable",
  372. "Classroom1",
  373. "Couch",
  374. "Flowers",
  375. "Mask",
  376. "Shopvac",
  377. "Sticks",
  378. "Storage",
  379. "Sword1",
  380. "Sword2",
  381. "Umbrella",
  382. ],
  383. "test": [
  384. "Plants",
  385. "Classroom2E",
  386. "Classroom2",
  387. "Australia",
  388. "DjembeL",
  389. "CrusadeP",
  390. "Crusade",
  391. "Hoops",
  392. "Bicycle2",
  393. "Staircase",
  394. "Newkuba",
  395. "AustraliaP",
  396. "Djembe",
  397. "Livingroom",
  398. "Computer",
  399. ],
  400. }
  401. _has_built_in_disparity_mask = True
  402. def __init__(
  403. self,
  404. root: str,
  405. split: str = "train",
  406. calibration: Optional[str] = "perfect",
  407. use_ambient_views: bool = False,
  408. transforms: Optional[Callable] = None,
  409. download: bool = False,
  410. ) -> None:
  411. super().__init__(root, transforms)
  412. verify_str_arg(split, "split", valid_values=("train", "test", "additional"))
  413. self.split = split
  414. if calibration:
  415. verify_str_arg(calibration, "calibration", valid_values=("perfect", "imperfect", "both", None)) # type: ignore
  416. if split == "test":
  417. raise ValueError("Split 'test' has only no calibration settings, please set `calibration=None`.")
  418. else:
  419. if split != "test":
  420. raise ValueError(
  421. f"Split '{split}' has calibration settings, however None was provided as an argument."
  422. f"\nSetting calibration to 'perfect' for split '{split}'. Available calibration settings are: 'perfect', 'imperfect', 'both'.",
  423. )
  424. if download:
  425. self._download_dataset(root)
  426. root = Path(root) / "Middlebury2014"
  427. if not os.path.exists(root / split):
  428. raise FileNotFoundError(f"The {split} directory was not found in the provided root directory")
  429. split_scenes = self.splits[split]
  430. # check that the provided root folder contains the scene splits
  431. if not any(
  432. # using startswith to account for perfect / imperfect calibrartion
  433. scene.startswith(s)
  434. for scene in os.listdir(root / split)
  435. for s in split_scenes
  436. ):
  437. raise FileNotFoundError(f"Provided root folder does not contain any scenes from the {split} split.")
  438. calibrartion_suffixes = {
  439. None: [""],
  440. "perfect": ["-perfect"],
  441. "imperfect": ["-imperfect"],
  442. "both": ["-perfect", "-imperfect"],
  443. }[calibration]
  444. for calibration_suffix in calibrartion_suffixes:
  445. scene_pattern = "*" + calibration_suffix
  446. left_img_pattern = str(root / split / scene_pattern / "im0.png")
  447. right_img_pattern = str(root / split / scene_pattern / "im1.png")
  448. self._images += self._scan_pairs(left_img_pattern, right_img_pattern)
  449. if split == "test":
  450. self._disparities = list((None, None) for _ in self._images)
  451. else:
  452. left_dispartity_pattern = str(root / split / scene_pattern / "disp0.pfm")
  453. right_dispartity_pattern = str(root / split / scene_pattern / "disp1.pfm")
  454. self._disparities += self._scan_pairs(left_dispartity_pattern, right_dispartity_pattern)
  455. self.use_ambient_views = use_ambient_views
  456. def _read_img(self, file_path: Union[str, Path]) -> Image.Image:
  457. """
  458. Function that reads either the original right image or an augmented view when ``use_ambient_views`` is True.
  459. When ``use_ambient_views`` is True, the dataset will return at random one of ``[im1.png, im1E.png, im1L.png]``
  460. as the right image.
  461. """
  462. ambient_file_paths: List[Union[str, Path]] # make mypy happy
  463. if not isinstance(file_path, Path):
  464. file_path = Path(file_path)
  465. if file_path.name == "im1.png" and self.use_ambient_views:
  466. base_path = file_path.parent
  467. # initialize sampleable container
  468. ambient_file_paths = list(base_path / view_name for view_name in ["im1E.png", "im1L.png"])
  469. # double check that we're not going to try to read from an invalid file path
  470. ambient_file_paths = list(filter(lambda p: os.path.exists(p), ambient_file_paths))
  471. # keep the original image as an option as well for uniform sampling between base views
  472. ambient_file_paths.append(file_path)
  473. file_path = random.choice(ambient_file_paths) # type: ignore
  474. return super()._read_img(file_path)
  475. def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
  476. # test split has not disparity maps
  477. if file_path is None:
  478. return None, None
  479. disparity_map = _read_pfm_file(file_path)
  480. disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
  481. disparity_map[disparity_map == np.inf] = 0 # remove infinite disparities
  482. valid_mask = (disparity_map > 0).squeeze(0) # mask out invalid disparities
  483. return disparity_map, valid_mask
  484. def _download_dataset(self, root: str) -> None:
  485. base_url = "https://vision.middlebury.edu/stereo/data/scenes2014/zip"
  486. # train and additional splits have 2 different calibration settings
  487. root = Path(root) / "Middlebury2014"
  488. split_name = self.split
  489. if split_name != "test":
  490. for split_scene in self.splits[split_name]:
  491. split_root = root / split_name
  492. for calibration in ["perfect", "imperfect"]:
  493. scene_name = f"{split_scene}-{calibration}"
  494. scene_url = f"{base_url}/{scene_name}.zip"
  495. print(f"Downloading {scene_url}")
  496. # download the scene only if it doesn't exist
  497. if not (split_root / scene_name).exists():
  498. download_and_extract_archive(
  499. url=scene_url,
  500. filename=f"{scene_name}.zip",
  501. download_root=str(split_root),
  502. remove_finished=True,
  503. )
  504. else:
  505. os.makedirs(root / "test")
  506. if any(s not in os.listdir(root / "test") for s in self.splits["test"]):
  507. # test split is downloaded from a different location
  508. test_set_url = "https://vision.middlebury.edu/stereo/submit3/zip/MiddEval3-data-F.zip"
  509. # the unzip is going to produce a directory MiddEval3 with two subdirectories trainingF and testF
  510. # we want to move the contents from testF into the directory
  511. download_and_extract_archive(url=test_set_url, download_root=str(root), remove_finished=True)
  512. for scene_dir, scene_names, _ in os.walk(str(root / "MiddEval3/testF")):
  513. for scene in scene_names:
  514. scene_dst_dir = root / "test"
  515. scene_src_dir = Path(scene_dir) / scene
  516. os.makedirs(scene_dst_dir, exist_ok=True)
  517. shutil.move(str(scene_src_dir), str(scene_dst_dir))
  518. # cleanup MiddEval3 directory
  519. shutil.rmtree(str(root / "MiddEval3"))
  520. def __getitem__(self, index: int) -> T2:
  521. """Return example at given index.
  522. Args:
  523. index(int): The index of the example to retrieve
  524. Returns:
  525. tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
  526. The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
  527. ``valid_mask`` is implicitly ``None`` for `split=test`.
  528. """
  529. return cast(T2, super().__getitem__(index))
  530. class CREStereo(StereoMatchingDataset):
  531. """Synthetic dataset used in training the `CREStereo <https://arxiv.org/pdf/2203.11483.pdf>`_ architecture.
  532. Dataset details on the official paper `repo <https://github.com/megvii-research/CREStereo>`_.
  533. The dataset is expected to have the following structure: ::
  534. root
  535. CREStereo
  536. tree
  537. img1_left.jpg
  538. img1_right.jpg
  539. img1_left.disp.jpg
  540. img1_right.disp.jpg
  541. img2_left.jpg
  542. img2_right.jpg
  543. img2_left.disp.jpg
  544. img2_right.disp.jpg
  545. ...
  546. shapenet
  547. img1_left.jpg
  548. img1_right.jpg
  549. img1_left.disp.jpg
  550. img1_right.disp.jpg
  551. ...
  552. reflective
  553. img1_left.jpg
  554. img1_right.jpg
  555. img1_left.disp.jpg
  556. img1_right.disp.jpg
  557. ...
  558. hole
  559. img1_left.jpg
  560. img1_right.jpg
  561. img1_left.disp.jpg
  562. img1_right.disp.jpg
  563. ...
  564. Args:
  565. root (str): Root directory of the dataset.
  566. transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
  567. """
  568. _has_built_in_disparity_mask = True
  569. def __init__(
  570. self,
  571. root: str,
  572. transforms: Optional[Callable] = None,
  573. ) -> None:
  574. super().__init__(root, transforms)
  575. root = Path(root) / "CREStereo"
  576. dirs = ["shapenet", "reflective", "tree", "hole"]
  577. for s in dirs:
  578. left_image_pattern = str(root / s / "*_left.jpg")
  579. right_image_pattern = str(root / s / "*_right.jpg")
  580. imgs = self._scan_pairs(left_image_pattern, right_image_pattern)
  581. self._images += imgs
  582. left_disparity_pattern = str(root / s / "*_left.disp.png")
  583. right_disparity_pattern = str(root / s / "*_right.disp.png")
  584. disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
  585. self._disparities += disparities
  586. def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
  587. disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
  588. # unsqueeze the disparity map into (C, H, W) format
  589. disparity_map = disparity_map[None, :, :] / 32.0
  590. valid_mask = None
  591. return disparity_map, valid_mask
  592. def __getitem__(self, index: int) -> T1:
  593. """Return example at given index.
  594. Args:
  595. index(int): The index of the example to retrieve
  596. Returns:
  597. tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
  598. The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
  599. ``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
  600. generate a valid mask.
  601. """
  602. return cast(T1, super().__getitem__(index))
  603. class FallingThingsStereo(StereoMatchingDataset):
  604. """`FallingThings <https://research.nvidia.com/publication/2018-06_falling-things-synthetic-dataset-3d-object-detection-and-pose-estimation>`_ dataset.
  605. The dataset is expected to have the following structure: ::
  606. root
  607. FallingThings
  608. single
  609. dir1
  610. scene1
  611. _object_settings.json
  612. _camera_settings.json
  613. image1.left.depth.png
  614. image1.right.depth.png
  615. image1.left.jpg
  616. image1.right.jpg
  617. image2.left.depth.png
  618. image2.right.depth.png
  619. image2.left.jpg
  620. image2.right
  621. ...
  622. scene2
  623. ...
  624. mixed
  625. scene1
  626. _object_settings.json
  627. _camera_settings.json
  628. image1.left.depth.png
  629. image1.right.depth.png
  630. image1.left.jpg
  631. image1.right.jpg
  632. image2.left.depth.png
  633. image2.right.depth.png
  634. image2.left.jpg
  635. image2.right
  636. ...
  637. scene2
  638. ...
  639. Args:
  640. root (string): Root directory where FallingThings is located.
  641. variant (string): Which variant to use. Either "single", "mixed", or "both".
  642. transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
  643. """
  644. def __init__(self, root: str, variant: str = "single", transforms: Optional[Callable] = None) -> None:
  645. super().__init__(root, transforms)
  646. root = Path(root) / "FallingThings"
  647. verify_str_arg(variant, "variant", valid_values=("single", "mixed", "both"))
  648. variants = {
  649. "single": ["single"],
  650. "mixed": ["mixed"],
  651. "both": ["single", "mixed"],
  652. }[variant]
  653. split_prefix = {
  654. "single": Path("*") / "*",
  655. "mixed": Path("*"),
  656. }
  657. for s in variants:
  658. left_img_pattern = str(root / s / split_prefix[s] / "*.left.jpg")
  659. right_img_pattern = str(root / s / split_prefix[s] / "*.right.jpg")
  660. self._images += self._scan_pairs(left_img_pattern, right_img_pattern)
  661. left_disparity_pattern = str(root / s / split_prefix[s] / "*.left.depth.png")
  662. right_disparity_pattern = str(root / s / split_prefix[s] / "*.right.depth.png")
  663. self._disparities += self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
  664. def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
  665. # (H, W) image
  666. depth = np.asarray(Image.open(file_path))
  667. # as per https://research.nvidia.com/sites/default/files/pubs/2018-06_Falling-Things/readme_0.txt
  668. # in order to extract disparity from depth maps
  669. camera_settings_path = Path(file_path).parent / "_camera_settings.json"
  670. with open(camera_settings_path, "r") as f:
  671. # inverse of depth-from-disparity equation: depth = (baseline * focal) / (disparity * pixel_constant)
  672. intrinsics = json.load(f)
  673. focal = intrinsics["camera_settings"][0]["intrinsic_settings"]["fx"]
  674. baseline, pixel_constant = 6, 100 # pixel constant is inverted
  675. disparity_map = (baseline * focal * pixel_constant) / depth.astype(np.float32)
  676. # unsqueeze disparity to (C, H, W)
  677. disparity_map = disparity_map[None, :, :]
  678. valid_mask = None
  679. return disparity_map, valid_mask
  680. def __getitem__(self, index: int) -> T1:
  681. """Return example at given index.
  682. Args:
  683. index(int): The index of the example to retrieve
  684. Returns:
  685. tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
  686. The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
  687. If a ``valid_mask`` is generated within the ``transforms`` parameter,
  688. a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
  689. """
  690. return cast(T1, super().__getitem__(index))
  691. class SceneFlowStereo(StereoMatchingDataset):
  692. """Dataset interface for `Scene Flow <https://lmb.informatik.uni-freiburg.de/resources/datasets/SceneFlowDatasets.en.html>`_ datasets.
  693. This interface provides access to the `FlyingThings3D, `Monkaa` and `Driving` datasets.
  694. The dataset is expected to have the following structure: ::
  695. root
  696. SceneFlow
  697. Monkaa
  698. frames_cleanpass
  699. scene1
  700. left
  701. img1.png
  702. img2.png
  703. right
  704. img1.png
  705. img2.png
  706. scene2
  707. left
  708. img1.png
  709. img2.png
  710. right
  711. img1.png
  712. img2.png
  713. frames_finalpass
  714. scene1
  715. left
  716. img1.png
  717. img2.png
  718. right
  719. img1.png
  720. img2.png
  721. ...
  722. ...
  723. disparity
  724. scene1
  725. left
  726. img1.pfm
  727. img2.pfm
  728. right
  729. img1.pfm
  730. img2.pfm
  731. FlyingThings3D
  732. ...
  733. ...
  734. Args:
  735. root (string): Root directory where SceneFlow is located.
  736. variant (string): Which dataset variant to user, "FlyingThings3D" (default), "Monkaa" or "Driving".
  737. pass_name (string): Which pass to use, "clean" (default), "final" or "both".
  738. transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
  739. """
  740. def __init__(
  741. self,
  742. root: str,
  743. variant: str = "FlyingThings3D",
  744. pass_name: str = "clean",
  745. transforms: Optional[Callable] = None,
  746. ) -> None:
  747. super().__init__(root, transforms)
  748. root = Path(root) / "SceneFlow"
  749. verify_str_arg(variant, "variant", valid_values=("FlyingThings3D", "Driving", "Monkaa"))
  750. verify_str_arg(pass_name, "pass_name", valid_values=("clean", "final", "both"))
  751. passes = {
  752. "clean": ["frames_cleanpass"],
  753. "final": ["frames_finalpass"],
  754. "both": ["frames_cleanpass", "frames_finalpass"],
  755. }[pass_name]
  756. root = root / variant
  757. prefix_directories = {
  758. "Monkaa": Path("*"),
  759. "FlyingThings3D": Path("*") / "*" / "*",
  760. "Driving": Path("*") / "*" / "*",
  761. }
  762. for p in passes:
  763. left_image_pattern = str(root / p / prefix_directories[variant] / "left" / "*.png")
  764. right_image_pattern = str(root / p / prefix_directories[variant] / "right" / "*.png")
  765. self._images += self._scan_pairs(left_image_pattern, right_image_pattern)
  766. left_disparity_pattern = str(root / "disparity" / prefix_directories[variant] / "left" / "*.pfm")
  767. right_disparity_pattern = str(root / "disparity" / prefix_directories[variant] / "right" / "*.pfm")
  768. self._disparities += self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
  769. def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
  770. disparity_map = _read_pfm_file(file_path)
  771. disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
  772. valid_mask = None
  773. return disparity_map, valid_mask
  774. def __getitem__(self, index: int) -> T1:
  775. """Return example at given index.
  776. Args:
  777. index(int): The index of the example to retrieve
  778. Returns:
  779. tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
  780. The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
  781. If a ``valid_mask`` is generated within the ``transforms`` parameter,
  782. a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
  783. """
  784. return cast(T1, super().__getitem__(index))
  785. class SintelStereo(StereoMatchingDataset):
  786. """Sintel `Stereo Dataset <http://sintel.is.tue.mpg.de/stereo>`_.
  787. The dataset is expected to have the following structure: ::
  788. root
  789. Sintel
  790. training
  791. final_left
  792. scene1
  793. img1.png
  794. img2.png
  795. ...
  796. ...
  797. final_right
  798. scene2
  799. img1.png
  800. img2.png
  801. ...
  802. ...
  803. disparities
  804. scene1
  805. img1.png
  806. img2.png
  807. ...
  808. ...
  809. occlusions
  810. scene1
  811. img1.png
  812. img2.png
  813. ...
  814. ...
  815. outofframe
  816. scene1
  817. img1.png
  818. img2.png
  819. ...
  820. ...
  821. Args:
  822. root (string): Root directory where Sintel Stereo is located.
  823. pass_name (string): The name of the pass to use, either "final", "clean" or "both".
  824. transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
  825. """
  826. _has_built_in_disparity_mask = True
  827. def __init__(self, root: str, pass_name: str = "final", transforms: Optional[Callable] = None) -> None:
  828. super().__init__(root, transforms)
  829. verify_str_arg(pass_name, "pass_name", valid_values=("final", "clean", "both"))
  830. root = Path(root) / "Sintel"
  831. pass_names = {
  832. "final": ["final"],
  833. "clean": ["clean"],
  834. "both": ["final", "clean"],
  835. }[pass_name]
  836. for p in pass_names:
  837. left_img_pattern = str(root / "training" / f"{p}_left" / "*" / "*.png")
  838. right_img_pattern = str(root / "training" / f"{p}_right" / "*" / "*.png")
  839. self._images += self._scan_pairs(left_img_pattern, right_img_pattern)
  840. disparity_pattern = str(root / "training" / "disparities" / "*" / "*.png")
  841. self._disparities += self._scan_pairs(disparity_pattern, None)
  842. def _get_occlussion_mask_paths(self, file_path: str) -> Tuple[str, str]:
  843. # helper function to get the occlusion mask paths
  844. # a path will look like .../.../.../training/disparities/scene1/img1.png
  845. # we want to get something like .../.../.../training/occlusions/scene1/img1.png
  846. fpath = Path(file_path)
  847. basename = fpath.name
  848. scenedir = fpath.parent
  849. # the parent of the scenedir is actually the disparity dir
  850. sampledir = scenedir.parent.parent
  851. occlusion_path = str(sampledir / "occlusions" / scenedir.name / basename)
  852. outofframe_path = str(sampledir / "outofframe" / scenedir.name / basename)
  853. if not os.path.exists(occlusion_path):
  854. raise FileNotFoundError(f"Occlusion mask {occlusion_path} does not exist")
  855. if not os.path.exists(outofframe_path):
  856. raise FileNotFoundError(f"Out of frame mask {outofframe_path} does not exist")
  857. return occlusion_path, outofframe_path
  858. def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
  859. if file_path is None:
  860. return None, None
  861. # disparity decoding as per Sintel instructions in the README provided with the dataset
  862. disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
  863. r, g, b = np.split(disparity_map, 3, axis=-1)
  864. disparity_map = r * 4 + g / (2**6) + b / (2**14)
  865. # reshape into (C, H, W) format
  866. disparity_map = np.transpose(disparity_map, (2, 0, 1))
  867. # find the appropriate file paths
  868. occlued_mask_path, out_of_frame_mask_path = self._get_occlussion_mask_paths(file_path)
  869. # occlusion masks
  870. valid_mask = np.asarray(Image.open(occlued_mask_path)) == 0
  871. # out of frame masks
  872. off_mask = np.asarray(Image.open(out_of_frame_mask_path)) == 0
  873. # combine the masks together
  874. valid_mask = np.logical_and(off_mask, valid_mask)
  875. return disparity_map, valid_mask
  876. def __getitem__(self, index: int) -> T2:
  877. """Return example at given index.
  878. Args:
  879. index(int): The index of the example to retrieve
  880. Returns:
  881. tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
  882. The disparity is a numpy array of shape (1, H, W) and the images are PIL images whilst
  883. the valid_mask is a numpy array of shape (H, W).
  884. """
  885. return cast(T2, super().__getitem__(index))
  886. class InStereo2k(StereoMatchingDataset):
  887. """`InStereo2k <https://github.com/YuhuaXu/StereoDataset>`_ dataset.
  888. The dataset is expected to have the following structure: ::
  889. root
  890. InStereo2k
  891. train
  892. scene1
  893. left.png
  894. right.png
  895. left_disp.png
  896. right_disp.png
  897. ...
  898. scene2
  899. ...
  900. test
  901. scene1
  902. left.png
  903. right.png
  904. left_disp.png
  905. right_disp.png
  906. ...
  907. scene2
  908. ...
  909. Args:
  910. root (string): Root directory where InStereo2k is located.
  911. split (string): Either "train" or "test".
  912. transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
  913. """
  914. def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
  915. super().__init__(root, transforms)
  916. root = Path(root) / "InStereo2k" / split
  917. verify_str_arg(split, "split", valid_values=("train", "test"))
  918. left_img_pattern = str(root / "*" / "left.png")
  919. right_img_pattern = str(root / "*" / "right.png")
  920. self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
  921. left_disparity_pattern = str(root / "*" / "left_disp.png")
  922. right_disparity_pattern = str(root / "*" / "right_disp.png")
  923. self._disparities = self._scan_pairs(left_disparity_pattern, right_disparity_pattern)
  924. def _read_disparity(self, file_path: str) -> Tuple[np.ndarray, None]:
  925. disparity_map = np.asarray(Image.open(file_path), dtype=np.float32)
  926. # unsqueeze disparity to (C, H, W)
  927. disparity_map = disparity_map[None, :, :] / 1024.0
  928. valid_mask = None
  929. return disparity_map, valid_mask
  930. def __getitem__(self, index: int) -> T1:
  931. """Return example at given index.
  932. Args:
  933. index(int): The index of the example to retrieve
  934. Returns:
  935. tuple: A 3-tuple with ``(img_left, img_right, disparity)``.
  936. The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
  937. If a ``valid_mask`` is generated within the ``transforms`` parameter,
  938. a 4-tuple with ``(img_left, img_right, disparity, valid_mask)`` is returned.
  939. """
  940. return cast(T1, super().__getitem__(index))
  941. class ETH3DStereo(StereoMatchingDataset):
  942. """ETH3D `Low-Res Two-View <https://www.eth3d.net/datasets>`_ dataset.
  943. The dataset is expected to have the following structure: ::
  944. root
  945. ETH3D
  946. two_view_training
  947. scene1
  948. im1.png
  949. im0.png
  950. images.txt
  951. cameras.txt
  952. calib.txt
  953. scene2
  954. im1.png
  955. im0.png
  956. images.txt
  957. cameras.txt
  958. calib.txt
  959. ...
  960. two_view_training_gt
  961. scene1
  962. disp0GT.pfm
  963. mask0nocc.png
  964. scene2
  965. disp0GT.pfm
  966. mask0nocc.png
  967. ...
  968. two_view_testing
  969. scene1
  970. im1.png
  971. im0.png
  972. images.txt
  973. cameras.txt
  974. calib.txt
  975. scene2
  976. im1.png
  977. im0.png
  978. images.txt
  979. cameras.txt
  980. calib.txt
  981. ...
  982. Args:
  983. root (string): Root directory of the ETH3D Dataset.
  984. split (string, optional): The dataset split of scenes, either "train" (default) or "test".
  985. transforms (callable, optional): A function/transform that takes in a sample and returns a transformed version.
  986. """
  987. _has_built_in_disparity_mask = True
  988. def __init__(self, root: str, split: str = "train", transforms: Optional[Callable] = None) -> None:
  989. super().__init__(root, transforms)
  990. verify_str_arg(split, "split", valid_values=("train", "test"))
  991. root = Path(root) / "ETH3D"
  992. img_dir = "two_view_training" if split == "train" else "two_view_test"
  993. anot_dir = "two_view_training_gt"
  994. left_img_pattern = str(root / img_dir / "*" / "im0.png")
  995. right_img_pattern = str(root / img_dir / "*" / "im1.png")
  996. self._images = self._scan_pairs(left_img_pattern, right_img_pattern)
  997. if split == "test":
  998. self._disparities = list((None, None) for _ in self._images)
  999. else:
  1000. disparity_pattern = str(root / anot_dir / "*" / "disp0GT.pfm")
  1001. self._disparities = self._scan_pairs(disparity_pattern, None)
  1002. def _read_disparity(self, file_path: str) -> Union[Tuple[None, None], Tuple[np.ndarray, np.ndarray]]:
  1003. # test split has no disparity maps
  1004. if file_path is None:
  1005. return None, None
  1006. disparity_map = _read_pfm_file(file_path)
  1007. disparity_map = np.abs(disparity_map) # ensure that the disparity is positive
  1008. mask_path = Path(file_path).parent / "mask0nocc.png"
  1009. valid_mask = Image.open(mask_path)
  1010. valid_mask = np.asarray(valid_mask).astype(bool)
  1011. return disparity_map, valid_mask
  1012. def __getitem__(self, index: int) -> T2:
  1013. """Return example at given index.
  1014. Args:
  1015. index(int): The index of the example to retrieve
  1016. Returns:
  1017. tuple: A 4-tuple with ``(img_left, img_right, disparity, valid_mask)``.
  1018. The disparity is a numpy array of shape (1, H, W) and the images are PIL images.
  1019. ``valid_mask`` is implicitly ``None`` if the ``transforms`` parameter does not
  1020. generate a valid mask.
  1021. Both ``disparity`` and ``valid_mask`` are ``None`` if the dataset split is test.
  1022. """
  1023. return cast(T2, super().__getitem__(index))