kinetics.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. import csv
  2. import os
  3. import time
  4. import urllib
  5. from functools import partial
  6. from multiprocessing import Pool
  7. from os import path
  8. from typing import Any, Callable, Dict, Optional, Tuple
  9. from torch import Tensor
  10. from .folder import find_classes, make_dataset
  11. from .utils import check_integrity, download_and_extract_archive, download_url, verify_str_arg
  12. from .video_utils import VideoClips
  13. from .vision import VisionDataset
  14. def _dl_wrap(tarpath: str, videopath: str, line: str) -> None:
  15. download_and_extract_archive(line, tarpath, videopath)
  16. class Kinetics(VisionDataset):
  17. """`Generic Kinetics <https://www.deepmind.com/open-source/kinetics>`_
  18. dataset.
  19. Kinetics-400/600/700 are action recognition video datasets.
  20. This dataset consider every video as a collection of video clips of fixed size, specified
  21. by ``frames_per_clip``, where the step in frames between each clip is given by
  22. ``step_between_clips``.
  23. To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
  24. and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
  25. elements will come from video 1, and the next three elements from video 2.
  26. Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
  27. frames in a video might be present.
  28. Args:
  29. root (string): Root directory of the Kinetics Dataset.
  30. Directory should be structured as follows:
  31. .. code::
  32. root/
  33. ├── split
  34. │ ├── class1
  35. │ │ ├── clip1.mp4
  36. │ │ ├── clip2.mp4
  37. │ │ ├── clip3.mp4
  38. │ │ ├── ...
  39. │ ├── class2
  40. │ │ ├── clipx.mp4
  41. │ │ └── ...
  42. Note: split is appended automatically using the split argument.
  43. frames_per_clip (int): number of frames in a clip
  44. num_classes (int): select between Kinetics-400 (default), Kinetics-600, and Kinetics-700
  45. split (str): split of the dataset to consider; supports ``"train"`` (default) ``"val"`` ``"test"``
  46. frame_rate (float): If omitted, interpolate different frame rate for each clip.
  47. step_between_clips (int): number of frames between each clip
  48. transform (callable, optional): A function/transform that takes in a TxHxWxC video
  49. and returns a transformed version.
  50. download (bool): Download the official version of the dataset to root folder.
  51. num_workers (int): Use multiple workers for VideoClips creation
  52. num_download_workers (int): Use multiprocessing in order to speed up download.
  53. output_format (str, optional): The format of the output video tensors (before transforms).
  54. Can be either "THWC" or "TCHW" (default).
  55. Note that in most other utils and datasets, the default is actually "THWC".
  56. Returns:
  57. tuple: A 3-tuple with the following entries:
  58. - video (Tensor[T, C, H, W] or Tensor[T, H, W, C]): the `T` video frames in torch.uint8 tensor
  59. - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
  60. and `L` is the number of points in torch.float tensor
  61. - label (int): class of the video clip
  62. Raises:
  63. RuntimeError: If ``download is True`` and the video archives are already extracted.
  64. """
  65. _TAR_URLS = {
  66. "400": "https://s3.amazonaws.com/kinetics/400/{split}/k400_{split}_path.txt",
  67. "600": "https://s3.amazonaws.com/kinetics/600/{split}/k600_{split}_path.txt",
  68. "700": "https://s3.amazonaws.com/kinetics/700_2020/{split}/k700_2020_{split}_path.txt",
  69. }
  70. _ANNOTATION_URLS = {
  71. "400": "https://s3.amazonaws.com/kinetics/400/annotations/{split}.csv",
  72. "600": "https://s3.amazonaws.com/kinetics/600/annotations/{split}.csv",
  73. "700": "https://s3.amazonaws.com/kinetics/700_2020/annotations/{split}.csv",
  74. }
  75. def __init__(
  76. self,
  77. root: str,
  78. frames_per_clip: int,
  79. num_classes: str = "400",
  80. split: str = "train",
  81. frame_rate: Optional[int] = None,
  82. step_between_clips: int = 1,
  83. transform: Optional[Callable] = None,
  84. extensions: Tuple[str, ...] = ("avi", "mp4"),
  85. download: bool = False,
  86. num_download_workers: int = 1,
  87. num_workers: int = 1,
  88. _precomputed_metadata: Optional[Dict[str, Any]] = None,
  89. _video_width: int = 0,
  90. _video_height: int = 0,
  91. _video_min_dimension: int = 0,
  92. _audio_samples: int = 0,
  93. _audio_channels: int = 0,
  94. _legacy: bool = False,
  95. output_format: str = "TCHW",
  96. ) -> None:
  97. # TODO: support test
  98. self.num_classes = verify_str_arg(num_classes, arg="num_classes", valid_values=["400", "600", "700"])
  99. self.extensions = extensions
  100. self.num_download_workers = num_download_workers
  101. self.root = root
  102. self._legacy = _legacy
  103. if _legacy:
  104. print("Using legacy structure")
  105. self.split_folder = root
  106. self.split = "unknown"
  107. output_format = "THWC"
  108. if download:
  109. raise ValueError("Cannot download the videos using legacy_structure.")
  110. else:
  111. self.split_folder = path.join(root, split)
  112. self.split = verify_str_arg(split, arg="split", valid_values=["train", "val", "test"])
  113. if download:
  114. self.download_and_process_videos()
  115. super().__init__(self.root)
  116. self.classes, class_to_idx = find_classes(self.split_folder)
  117. self.samples = make_dataset(self.split_folder, class_to_idx, extensions, is_valid_file=None)
  118. video_list = [x[0] for x in self.samples]
  119. self.video_clips = VideoClips(
  120. video_list,
  121. frames_per_clip,
  122. step_between_clips,
  123. frame_rate,
  124. _precomputed_metadata,
  125. num_workers=num_workers,
  126. _video_width=_video_width,
  127. _video_height=_video_height,
  128. _video_min_dimension=_video_min_dimension,
  129. _audio_samples=_audio_samples,
  130. _audio_channels=_audio_channels,
  131. output_format=output_format,
  132. )
  133. self.transform = transform
  134. def download_and_process_videos(self) -> None:
  135. """Downloads all the videos to the _root_ folder in the expected format."""
  136. tic = time.time()
  137. self._download_videos()
  138. toc = time.time()
  139. print("Elapsed time for downloading in mins ", (toc - tic) / 60)
  140. self._make_ds_structure()
  141. toc2 = time.time()
  142. print("Elapsed time for processing in mins ", (toc2 - toc) / 60)
  143. print("Elapsed time overall in mins ", (toc2 - tic) / 60)
  144. def _download_videos(self) -> None:
  145. """download tarballs containing the video to "tars" folder and extract them into the _split_ folder where
  146. split is one of the official dataset splits.
  147. Raises:
  148. RuntimeError: if download folder exists, break to prevent downloading entire dataset again.
  149. """
  150. if path.exists(self.split_folder):
  151. raise RuntimeError(
  152. f"The directory {self.split_folder} already exists. "
  153. f"If you want to re-download or re-extract the images, delete the directory."
  154. )
  155. tar_path = path.join(self.root, "tars")
  156. file_list_path = path.join(self.root, "files")
  157. split_url = self._TAR_URLS[self.num_classes].format(split=self.split)
  158. split_url_filepath = path.join(file_list_path, path.basename(split_url))
  159. if not check_integrity(split_url_filepath):
  160. download_url(split_url, file_list_path)
  161. with open(split_url_filepath) as file:
  162. list_video_urls = [urllib.parse.quote(line, safe="/,:") for line in file.read().splitlines()]
  163. if self.num_download_workers == 1:
  164. for line in list_video_urls:
  165. download_and_extract_archive(line, tar_path, self.split_folder)
  166. else:
  167. part = partial(_dl_wrap, tar_path, self.split_folder)
  168. poolproc = Pool(self.num_download_workers)
  169. poolproc.map(part, list_video_urls)
  170. def _make_ds_structure(self) -> None:
  171. """move videos from
  172. split_folder/
  173. ├── clip1.avi
  174. ├── clip2.avi
  175. to the correct format as described below:
  176. split_folder/
  177. ├── class1
  178. │ ├── clip1.avi
  179. """
  180. annotation_path = path.join(self.root, "annotations")
  181. if not check_integrity(path.join(annotation_path, f"{self.split}.csv")):
  182. download_url(self._ANNOTATION_URLS[self.num_classes].format(split=self.split), annotation_path)
  183. annotations = path.join(annotation_path, f"{self.split}.csv")
  184. file_fmtstr = "{ytid}_{start:06}_{end:06}.mp4"
  185. with open(annotations) as csvfile:
  186. reader = csv.DictReader(csvfile)
  187. for row in reader:
  188. f = file_fmtstr.format(
  189. ytid=row["youtube_id"],
  190. start=int(row["time_start"]),
  191. end=int(row["time_end"]),
  192. )
  193. label = row["label"].replace(" ", "_").replace("'", "").replace("(", "").replace(")", "")
  194. os.makedirs(path.join(self.split_folder, label), exist_ok=True)
  195. downloaded_file = path.join(self.split_folder, f)
  196. if path.isfile(downloaded_file):
  197. os.replace(
  198. downloaded_file,
  199. path.join(self.split_folder, label, f),
  200. )
  201. @property
  202. def metadata(self) -> Dict[str, Any]:
  203. return self.video_clips.metadata
  204. def __len__(self) -> int:
  205. return self.video_clips.num_clips()
  206. def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, int]:
  207. video, audio, info, video_idx = self.video_clips.get_clip(idx)
  208. label = self.samples[video_idx][1]
  209. if self.transform is not None:
  210. video = self.transform(video)
  211. return video, audio, label