celeba.py 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. import csv
  2. import os
  3. from collections import namedtuple
  4. from typing import Any, Callable, List, Optional, Tuple, Union
  5. import PIL
  6. import torch
  7. from .utils import check_integrity, download_file_from_google_drive, extract_archive, verify_str_arg
  8. from .vision import VisionDataset
  9. CSV = namedtuple("CSV", ["header", "index", "data"])
  10. class CelebA(VisionDataset):
  11. """`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset.
  12. Args:
  13. root (string): Root directory where images are downloaded to.
  14. split (string): One of {'train', 'valid', 'test', 'all'}.
  15. Accordingly dataset is selected.
  16. target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``,
  17. or ``landmarks``. Can also be a list to output a tuple with all specified target types.
  18. The targets represent:
  19. - ``attr`` (Tensor shape=(40,) dtype=int): binary (0, 1) labels for attributes
  20. - ``identity`` (int): label for each person (data points with the same identity are the same person)
  21. - ``bbox`` (Tensor shape=(4,) dtype=int): bounding box (x, y, width, height)
  22. - ``landmarks`` (Tensor shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x,
  23. righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y)
  24. Defaults to ``attr``. If empty, ``None`` will be returned as target.
  25. transform (callable, optional): A function/transform that takes in an PIL image
  26. and returns a transformed version. E.g, ``transforms.PILToTensor``
  27. target_transform (callable, optional): A function/transform that takes in the
  28. target and transforms it.
  29. download (bool, optional): If true, downloads the dataset from the internet and
  30. puts it in root directory. If dataset is already downloaded, it is not
  31. downloaded again.
  32. .. warning::
  33. To download the dataset `gdown <https://github.com/wkentaro/gdown>`_ is required.
  34. """
  35. base_folder = "celeba"
  36. # There currently does not appear to be an easy way to extract 7z in python (without introducing additional
  37. # dependencies). The "in-the-wild" (not aligned+cropped) images are only in 7z, so they are not available
  38. # right now.
  39. file_list = [
  40. # File ID MD5 Hash Filename
  41. ("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"),
  42. # ("0B7EVK8r0v71pbWNEUjJKdDQ3dGc","b6cd7e93bc7a96c2dc33f819aa3ac651", "img_align_celeba_png.7z"),
  43. # ("0B7EVK8r0v71peklHb0pGdDl6R28", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_celeba.7z"),
  44. ("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"),
  45. ("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"),
  46. ("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"),
  47. ("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"),
  48. # ("0B7EVK8r0v71pTzJIdlJWdHczRlU", "063ee6ddb681f96bc9ca28c6febb9d1a", "list_landmarks_celeba.txt"),
  49. ("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"),
  50. ]
  51. def __init__(
  52. self,
  53. root: str,
  54. split: str = "train",
  55. target_type: Union[List[str], str] = "attr",
  56. transform: Optional[Callable] = None,
  57. target_transform: Optional[Callable] = None,
  58. download: bool = False,
  59. ) -> None:
  60. super().__init__(root, transform=transform, target_transform=target_transform)
  61. self.split = split
  62. if isinstance(target_type, list):
  63. self.target_type = target_type
  64. else:
  65. self.target_type = [target_type]
  66. if not self.target_type and self.target_transform is not None:
  67. raise RuntimeError("target_transform is specified but target_type is empty")
  68. if download:
  69. self.download()
  70. if not self._check_integrity():
  71. raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it")
  72. split_map = {
  73. "train": 0,
  74. "valid": 1,
  75. "test": 2,
  76. "all": None,
  77. }
  78. split_ = split_map[verify_str_arg(split.lower(), "split", ("train", "valid", "test", "all"))]
  79. splits = self._load_csv("list_eval_partition.txt")
  80. identity = self._load_csv("identity_CelebA.txt")
  81. bbox = self._load_csv("list_bbox_celeba.txt", header=1)
  82. landmarks_align = self._load_csv("list_landmarks_align_celeba.txt", header=1)
  83. attr = self._load_csv("list_attr_celeba.txt", header=1)
  84. mask = slice(None) if split_ is None else (splits.data == split_).squeeze()
  85. if mask == slice(None): # if split == "all"
  86. self.filename = splits.index
  87. else:
  88. self.filename = [splits.index[i] for i in torch.squeeze(torch.nonzero(mask))]
  89. self.identity = identity.data[mask]
  90. self.bbox = bbox.data[mask]
  91. self.landmarks_align = landmarks_align.data[mask]
  92. self.attr = attr.data[mask]
  93. # map from {-1, 1} to {0, 1}
  94. self.attr = torch.div(self.attr + 1, 2, rounding_mode="floor")
  95. self.attr_names = attr.header
  96. def _load_csv(
  97. self,
  98. filename: str,
  99. header: Optional[int] = None,
  100. ) -> CSV:
  101. with open(os.path.join(self.root, self.base_folder, filename)) as csv_file:
  102. data = list(csv.reader(csv_file, delimiter=" ", skipinitialspace=True))
  103. if header is not None:
  104. headers = data[header]
  105. data = data[header + 1 :]
  106. else:
  107. headers = []
  108. indices = [row[0] for row in data]
  109. data = [row[1:] for row in data]
  110. data_int = [list(map(int, i)) for i in data]
  111. return CSV(headers, indices, torch.tensor(data_int))
  112. def _check_integrity(self) -> bool:
  113. for (_, md5, filename) in self.file_list:
  114. fpath = os.path.join(self.root, self.base_folder, filename)
  115. _, ext = os.path.splitext(filename)
  116. # Allow original archive to be deleted (zip and 7z)
  117. # Only need the extracted images
  118. if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5):
  119. return False
  120. # Should check a hash of the images
  121. return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba"))
  122. def download(self) -> None:
  123. if self._check_integrity():
  124. print("Files already downloaded and verified")
  125. return
  126. for (file_id, md5, filename) in self.file_list:
  127. download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5)
  128. extract_archive(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"))
  129. def __getitem__(self, index: int) -> Tuple[Any, Any]:
  130. X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index]))
  131. target: Any = []
  132. for t in self.target_type:
  133. if t == "attr":
  134. target.append(self.attr[index, :])
  135. elif t == "identity":
  136. target.append(self.identity[index, 0])
  137. elif t == "bbox":
  138. target.append(self.bbox[index, :])
  139. elif t == "landmarks":
  140. target.append(self.landmarks_align[index, :])
  141. else:
  142. # TODO: refactor with utils.verify_str_arg
  143. raise ValueError(f'Target type "{t}" is not recognized.')
  144. if self.transform is not None:
  145. X = self.transform(X)
  146. if target:
  147. target = tuple(target) if len(target) > 1 else target[0]
  148. if self.target_transform is not None:
  149. target = self.target_transform(target)
  150. else:
  151. target = None
  152. return X, target
  153. def __len__(self) -> int:
  154. return len(self.attr)
  155. def extra_repr(self) -> str:
  156. lines = ["Target type: {target_type}", "Split: {split}"]
  157. return "\n".join(lines).format(**self.__dict__)