hmdb51.py 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. import glob
  2. import os
  3. from typing import Any, Callable, Dict, List, Optional, Tuple
  4. from torch import Tensor
  5. from .folder import find_classes, make_dataset
  6. from .video_utils import VideoClips
  7. from .vision import VisionDataset
  8. class HMDB51(VisionDataset):
  9. """
  10. `HMDB51 <https://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/>`_
  11. dataset.
  12. HMDB51 is an action recognition video dataset.
  13. This dataset consider every video as a collection of video clips of fixed size, specified
  14. by ``frames_per_clip``, where the step in frames between each clip is given by
  15. ``step_between_clips``.
  16. To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5``
  17. and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two
  18. elements will come from video 1, and the next three elements from video 2.
  19. Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all
  20. frames in a video might be present.
  21. Internally, it uses a VideoClips object to handle clip creation.
  22. Args:
  23. root (string): Root directory of the HMDB51 Dataset.
  24. annotation_path (str): Path to the folder containing the split files.
  25. frames_per_clip (int): Number of frames in a clip.
  26. step_between_clips (int): Number of frames between each clip.
  27. fold (int, optional): Which fold to use. Should be between 1 and 3.
  28. train (bool, optional): If ``True``, creates a dataset from the train split,
  29. otherwise from the ``test`` split.
  30. transform (callable, optional): A function/transform that takes in a TxHxWxC video
  31. and returns a transformed version.
  32. output_format (str, optional): The format of the output video tensors (before transforms).
  33. Can be either "THWC" (default) or "TCHW".
  34. Returns:
  35. tuple: A 3-tuple with the following entries:
  36. - video (Tensor[T, H, W, C] or Tensor[T, C, H, W]): The `T` video frames
  37. - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels
  38. and `L` is the number of points
  39. - label (int): class of the video clip
  40. """
  41. data_url = "https://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/hmdb51_org.rar"
  42. splits = {
  43. "url": "https://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/test_train_splits.rar",
  44. "md5": "15e67781e70dcfbdce2d7dbb9b3344b5",
  45. }
  46. TRAIN_TAG = 1
  47. TEST_TAG = 2
  48. def __init__(
  49. self,
  50. root: str,
  51. annotation_path: str,
  52. frames_per_clip: int,
  53. step_between_clips: int = 1,
  54. frame_rate: Optional[int] = None,
  55. fold: int = 1,
  56. train: bool = True,
  57. transform: Optional[Callable] = None,
  58. _precomputed_metadata: Optional[Dict[str, Any]] = None,
  59. num_workers: int = 1,
  60. _video_width: int = 0,
  61. _video_height: int = 0,
  62. _video_min_dimension: int = 0,
  63. _audio_samples: int = 0,
  64. output_format: str = "THWC",
  65. ) -> None:
  66. super().__init__(root)
  67. if fold not in (1, 2, 3):
  68. raise ValueError(f"fold should be between 1 and 3, got {fold}")
  69. extensions = ("avi",)
  70. self.classes, class_to_idx = find_classes(self.root)
  71. self.samples = make_dataset(
  72. self.root,
  73. class_to_idx,
  74. extensions,
  75. )
  76. video_paths = [path for (path, _) in self.samples]
  77. video_clips = VideoClips(
  78. video_paths,
  79. frames_per_clip,
  80. step_between_clips,
  81. frame_rate,
  82. _precomputed_metadata,
  83. num_workers=num_workers,
  84. _video_width=_video_width,
  85. _video_height=_video_height,
  86. _video_min_dimension=_video_min_dimension,
  87. _audio_samples=_audio_samples,
  88. output_format=output_format,
  89. )
  90. # we bookkeep the full version of video clips because we want to be able
  91. # to return the metadata of full version rather than the subset version of
  92. # video clips
  93. self.full_video_clips = video_clips
  94. self.fold = fold
  95. self.train = train
  96. self.indices = self._select_fold(video_paths, annotation_path, fold, train)
  97. self.video_clips = video_clips.subset(self.indices)
  98. self.transform = transform
  99. @property
  100. def metadata(self) -> Dict[str, Any]:
  101. return self.full_video_clips.metadata
  102. def _select_fold(self, video_list: List[str], annotations_dir: str, fold: int, train: bool) -> List[int]:
  103. target_tag = self.TRAIN_TAG if train else self.TEST_TAG
  104. split_pattern_name = f"*test_split{fold}.txt"
  105. split_pattern_path = os.path.join(annotations_dir, split_pattern_name)
  106. annotation_paths = glob.glob(split_pattern_path)
  107. selected_files = set()
  108. for filepath in annotation_paths:
  109. with open(filepath) as fid:
  110. lines = fid.readlines()
  111. for line in lines:
  112. video_filename, tag_string = line.split()
  113. tag = int(tag_string)
  114. if tag == target_tag:
  115. selected_files.add(video_filename)
  116. indices = []
  117. for video_index, video_path in enumerate(video_list):
  118. if os.path.basename(video_path) in selected_files:
  119. indices.append(video_index)
  120. return indices
  121. def __len__(self) -> int:
  122. return self.video_clips.num_clips()
  123. def __getitem__(self, idx: int) -> Tuple[Tensor, Tensor, int]:
  124. video, audio, _, video_idx = self.video_clips.get_clip(idx)
  125. sample_index = self.indices[video_idx]
  126. _, class_index = self.samples[sample_index]
  127. if self.transform is not None:
  128. video = self.transform(video)
  129. return video, audio, class_index