phototour.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228
  1. import os
  2. from typing import Any, Callable, List, Optional, Tuple, Union
  3. import numpy as np
  4. import torch
  5. from PIL import Image
  6. from .utils import download_url
  7. from .vision import VisionDataset
  8. class PhotoTour(VisionDataset):
  9. """`Multi-view Stereo Correspondence <http://matthewalunbrown.com/patchdata/patchdata.html>`_ Dataset.
  10. .. note::
  11. We only provide the newer version of the dataset, since the authors state that it
  12. is more suitable for training descriptors based on difference of Gaussian, or Harris corners, as the
  13. patches are centred on real interest point detections, rather than being projections of 3D points as is the
  14. case in the old dataset.
  15. The original dataset is available under http://phototour.cs.washington.edu/patches/default.htm.
  16. Args:
  17. root (string): Root directory where images are.
  18. name (string): Name of the dataset to load.
  19. transform (callable, optional): A function/transform that takes in an PIL image
  20. and returns a transformed version.
  21. download (bool, optional): If true, downloads the dataset from the internet and
  22. puts it in root directory. If dataset is already downloaded, it is not
  23. downloaded again.
  24. """
  25. urls = {
  26. "notredame_harris": [
  27. "http://matthewalunbrown.com/patchdata/notredame_harris.zip",
  28. "notredame_harris.zip",
  29. "69f8c90f78e171349abdf0307afefe4d",
  30. ],
  31. "yosemite_harris": [
  32. "http://matthewalunbrown.com/patchdata/yosemite_harris.zip",
  33. "yosemite_harris.zip",
  34. "a73253d1c6fbd3ba2613c45065c00d46",
  35. ],
  36. "liberty_harris": [
  37. "http://matthewalunbrown.com/patchdata/liberty_harris.zip",
  38. "liberty_harris.zip",
  39. "c731fcfb3abb4091110d0ae8c7ba182c",
  40. ],
  41. "notredame": [
  42. "http://icvl.ee.ic.ac.uk/vbalnt/notredame.zip",
  43. "notredame.zip",
  44. "509eda8535847b8c0a90bbb210c83484",
  45. ],
  46. "yosemite": ["http://icvl.ee.ic.ac.uk/vbalnt/yosemite.zip", "yosemite.zip", "533b2e8eb7ede31be40abc317b2fd4f0"],
  47. "liberty": ["http://icvl.ee.ic.ac.uk/vbalnt/liberty.zip", "liberty.zip", "fdd9152f138ea5ef2091746689176414"],
  48. }
  49. means = {
  50. "notredame": 0.4854,
  51. "yosemite": 0.4844,
  52. "liberty": 0.4437,
  53. "notredame_harris": 0.4854,
  54. "yosemite_harris": 0.4844,
  55. "liberty_harris": 0.4437,
  56. }
  57. stds = {
  58. "notredame": 0.1864,
  59. "yosemite": 0.1818,
  60. "liberty": 0.2019,
  61. "notredame_harris": 0.1864,
  62. "yosemite_harris": 0.1818,
  63. "liberty_harris": 0.2019,
  64. }
  65. lens = {
  66. "notredame": 468159,
  67. "yosemite": 633587,
  68. "liberty": 450092,
  69. "liberty_harris": 379587,
  70. "yosemite_harris": 450912,
  71. "notredame_harris": 325295,
  72. }
  73. image_ext = "bmp"
  74. info_file = "info.txt"
  75. matches_files = "m50_100000_100000_0.txt"
  76. def __init__(
  77. self, root: str, name: str, train: bool = True, transform: Optional[Callable] = None, download: bool = False
  78. ) -> None:
  79. super().__init__(root, transform=transform)
  80. self.name = name
  81. self.data_dir = os.path.join(self.root, name)
  82. self.data_down = os.path.join(self.root, f"{name}.zip")
  83. self.data_file = os.path.join(self.root, f"{name}.pt")
  84. self.train = train
  85. self.mean = self.means[name]
  86. self.std = self.stds[name]
  87. if download:
  88. self.download()
  89. if not self._check_datafile_exists():
  90. self.cache()
  91. # load the serialized data
  92. self.data, self.labels, self.matches = torch.load(self.data_file)
  93. def __getitem__(self, index: int) -> Union[torch.Tensor, Tuple[Any, Any, torch.Tensor]]:
  94. """
  95. Args:
  96. index (int): Index
  97. Returns:
  98. tuple: (data1, data2, matches)
  99. """
  100. if self.train:
  101. data = self.data[index]
  102. if self.transform is not None:
  103. data = self.transform(data)
  104. return data
  105. m = self.matches[index]
  106. data1, data2 = self.data[m[0]], self.data[m[1]]
  107. if self.transform is not None:
  108. data1 = self.transform(data1)
  109. data2 = self.transform(data2)
  110. return data1, data2, m[2]
  111. def __len__(self) -> int:
  112. return len(self.data if self.train else self.matches)
  113. def _check_datafile_exists(self) -> bool:
  114. return os.path.exists(self.data_file)
  115. def _check_downloaded(self) -> bool:
  116. return os.path.exists(self.data_dir)
  117. def download(self) -> None:
  118. if self._check_datafile_exists():
  119. print(f"# Found cached data {self.data_file}")
  120. return
  121. if not self._check_downloaded():
  122. # download files
  123. url = self.urls[self.name][0]
  124. filename = self.urls[self.name][1]
  125. md5 = self.urls[self.name][2]
  126. fpath = os.path.join(self.root, filename)
  127. download_url(url, self.root, filename, md5)
  128. print(f"# Extracting data {self.data_down}\n")
  129. import zipfile
  130. with zipfile.ZipFile(fpath, "r") as z:
  131. z.extractall(self.data_dir)
  132. os.unlink(fpath)
  133. def cache(self) -> None:
  134. # process and save as torch files
  135. print(f"# Caching data {self.data_file}")
  136. dataset = (
  137. read_image_file(self.data_dir, self.image_ext, self.lens[self.name]),
  138. read_info_file(self.data_dir, self.info_file),
  139. read_matches_files(self.data_dir, self.matches_files),
  140. )
  141. with open(self.data_file, "wb") as f:
  142. torch.save(dataset, f)
  143. def extra_repr(self) -> str:
  144. split = "Train" if self.train is True else "Test"
  145. return f"Split: {split}"
  146. def read_image_file(data_dir: str, image_ext: str, n: int) -> torch.Tensor:
  147. """Return a Tensor containing the patches"""
  148. def PIL2array(_img: Image.Image) -> np.ndarray:
  149. """Convert PIL image type to numpy 2D array"""
  150. return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64)
  151. def find_files(_data_dir: str, _image_ext: str) -> List[str]:
  152. """Return a list with the file names of the images containing the patches"""
  153. files = []
  154. # find those files with the specified extension
  155. for file_dir in os.listdir(_data_dir):
  156. if file_dir.endswith(_image_ext):
  157. files.append(os.path.join(_data_dir, file_dir))
  158. return sorted(files) # sort files in ascend order to keep relations
  159. patches = []
  160. list_files = find_files(data_dir, image_ext)
  161. for fpath in list_files:
  162. img = Image.open(fpath)
  163. for y in range(0, img.height, 64):
  164. for x in range(0, img.width, 64):
  165. patch = img.crop((x, y, x + 64, y + 64))
  166. patches.append(PIL2array(patch))
  167. return torch.ByteTensor(np.array(patches[:n]))
  168. def read_info_file(data_dir: str, info_file: str) -> torch.Tensor:
  169. """Return a Tensor containing the list of labels
  170. Read the file and keep only the ID of the 3D point.
  171. """
  172. with open(os.path.join(data_dir, info_file)) as f:
  173. labels = [int(line.split()[0]) for line in f]
  174. return torch.LongTensor(labels)
  175. def read_matches_files(data_dir: str, matches_file: str) -> torch.Tensor:
  176. """Return a Tensor containing the ground truth matches
  177. Read the file and keep only 3D point ID.
  178. Matches are represented with a 1, non matches with a 0.
  179. """
  180. matches = []
  181. with open(os.path.join(data_dir, matches_file)) as f:
  182. for line in f:
  183. line_split = line.split()
  184. matches.append([int(line_split[0]), int(line_split[3]), int(line_split[1] == line_split[4])])
  185. return torch.LongTensor(matches)