cifar.py 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167
  1. import os.path
  2. import pickle
  3. from typing import Any, Callable, Optional, Tuple
  4. import numpy as np
  5. from PIL import Image
  6. from .utils import check_integrity, download_and_extract_archive
  7. from .vision import VisionDataset
  8. class CIFAR10(VisionDataset):
  9. """`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
  10. Args:
  11. root (string): Root directory of dataset where directory
  12. ``cifar-10-batches-py`` exists or will be saved to if download is set to True.
  13. train (bool, optional): If True, creates dataset from training set, otherwise
  14. creates from test set.
  15. transform (callable, optional): A function/transform that takes in an PIL image
  16. and returns a transformed version. E.g, ``transforms.RandomCrop``
  17. target_transform (callable, optional): A function/transform that takes in the
  18. target and transforms it.
  19. download (bool, optional): If true, downloads the dataset from the internet and
  20. puts it in root directory. If dataset is already downloaded, it is not
  21. downloaded again.
  22. """
  23. base_folder = "cifar-10-batches-py"
  24. url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
  25. filename = "cifar-10-python.tar.gz"
  26. tgz_md5 = "c58f30108f718f92721af3b95e74349a"
  27. train_list = [
  28. ["data_batch_1", "c99cafc152244af753f735de768cd75f"],
  29. ["data_batch_2", "d4bba439e000b95fd0a9bffe97cbabec"],
  30. ["data_batch_3", "54ebc095f3ab1f0389bbae665268c751"],
  31. ["data_batch_4", "634d18415352ddfa80567beed471001a"],
  32. ["data_batch_5", "482c414d41f54cd18b22e5b47cb7c3cb"],
  33. ]
  34. test_list = [
  35. ["test_batch", "40351d587109b95175f43aff81a1287e"],
  36. ]
  37. meta = {
  38. "filename": "batches.meta",
  39. "key": "label_names",
  40. "md5": "5ff9c542aee3614f3951f8cda6e48888",
  41. }
  42. def __init__(
  43. self,
  44. root: str,
  45. train: bool = True,
  46. transform: Optional[Callable] = None,
  47. target_transform: Optional[Callable] = None,
  48. download: bool = False,
  49. ) -> None:
  50. super().__init__(root, transform=transform, target_transform=target_transform)
  51. self.train = train # training set or test set
  52. if download:
  53. self.download()
  54. if not self._check_integrity():
  55. raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
  56. if self.train:
  57. downloaded_list = self.train_list
  58. else:
  59. downloaded_list = self.test_list
  60. self.data: Any = []
  61. self.targets = []
  62. # now load the picked numpy arrays
  63. for file_name, checksum in downloaded_list:
  64. file_path = os.path.join(self.root, self.base_folder, file_name)
  65. with open(file_path, "rb") as f:
  66. entry = pickle.load(f, encoding="latin1")
  67. self.data.append(entry["data"])
  68. if "labels" in entry:
  69. self.targets.extend(entry["labels"])
  70. else:
  71. self.targets.extend(entry["fine_labels"])
  72. self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
  73. self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
  74. self._load_meta()
  75. def _load_meta(self) -> None:
  76. path = os.path.join(self.root, self.base_folder, self.meta["filename"])
  77. if not check_integrity(path, self.meta["md5"]):
  78. raise RuntimeError("Dataset metadata file not found or corrupted. You can use download=True to download it")
  79. with open(path, "rb") as infile:
  80. data = pickle.load(infile, encoding="latin1")
  81. self.classes = data[self.meta["key"]]
  82. self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
  83. def __getitem__(self, index: int) -> Tuple[Any, Any]:
  84. """
  85. Args:
  86. index (int): Index
  87. Returns:
  88. tuple: (image, target) where target is index of the target class.
  89. """
  90. img, target = self.data[index], self.targets[index]
  91. # doing this so that it is consistent with all other datasets
  92. # to return a PIL Image
  93. img = Image.fromarray(img)
  94. if self.transform is not None:
  95. img = self.transform(img)
  96. if self.target_transform is not None:
  97. target = self.target_transform(target)
  98. return img, target
  99. def __len__(self) -> int:
  100. return len(self.data)
  101. def _check_integrity(self) -> bool:
  102. for filename, md5 in self.train_list + self.test_list:
  103. fpath = os.path.join(self.root, self.base_folder, filename)
  104. if not check_integrity(fpath, md5):
  105. return False
  106. return True
  107. def download(self) -> None:
  108. if self._check_integrity():
  109. print("Files already downloaded and verified")
  110. return
  111. download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5)
  112. def extra_repr(self) -> str:
  113. split = "Train" if self.train is True else "Test"
  114. return f"Split: {split}"
  115. class CIFAR100(CIFAR10):
  116. """`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
  117. This is a subclass of the `CIFAR10` Dataset.
  118. """
  119. base_folder = "cifar-100-python"
  120. url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz"
  121. filename = "cifar-100-python.tar.gz"
  122. tgz_md5 = "eb9058c3a382ffc7106e4002c42a8d85"
  123. train_list = [
  124. ["train", "16019d7e3df5f24257cddd939b257f8d"],
  125. ]
  126. test_list = [
  127. ["test", "f0ef6b0ae62326f3e7ffdfab6717acfc"],
  128. ]
  129. meta = {
  130. "filename": "meta",
  131. "key": "fine_label_names",
  132. "md5": "7973b15100ade9c7d40fb424638fde48",
  133. }