import datasets import os import tarfile import json import urllib.request from PIL import Image import io class FaceObfuscatedImagenet(datasets.GeneratorBasedBuilder): """Comprehensive ImageNet dataset collection with face obfuscation support. This dataset includes multiple variants such as the full ILSVRC2012 ImageNet (1k), ImageNet-100, Imagenette (10), and ImageNet-Mini subsets, alongside their corresponding face-obfuscated (blurred) versions. This dataset supports privacy-preserving research by providing both original and face-obfuscated images for each subset. The face-obfuscated versions are designed to protect privacy while maintaining the utility of the dataset for training and evaluation purposes. """ # This could be a more descriptive name, but we keep it simple for now VERSION = datasets.Version("1.0.0") # URLs for automatic download _INET_TRAIN_URL = ( "https://www.image-net.org/data/ILSVRC/2012/ILSVRC2012_img_train.tar" ) _INET_VAL_URL = "https://www.image-net.org/data/ILSVRC/2012/ILSVRC2012_img_val.tar" _INET_DEVKIT_URL = ( "https://www.image-net.org/data/ILSVRC/2012/ILSVRC2012_devkit_t12.tar.gz" ) _NOFACE_TRAIN_URL = "https://image-net.org/data/ILSVRC/blurred/train_blurred.tar.gz" _NOFACE_VAL_URL = "https://image-net.org/data/ILSVRC/blurred/val_blurred.tar.gz" _CLASS_INDEX_URL = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json" # this allows us to use the same class and cache for different "versions" of the dataset" BUILDER_CONFIGS = [ datasets.BuilderConfig( name="1k", version=VERSION, description=( "Standard ILSVRC2012 ImageNet dataset containing approximately 1.2 million " "training images across 1000 object categories. This widely used benchmark " "dataset is designed for image classification and object recognition tasks." ), ), datasets.BuilderConfig( name="100", version=VERSION, description=( "ImageNet-100 is a curated subset of the full ImageNet dataset consisting of 100 " "carefully selected classes. This smaller dataset enables faster experimentation " "while maintaining a diverse set of categories." ), ), datasets.BuilderConfig( name="10", version=VERSION, description=( "Imagenette is a subset of 10 easily classified ImageNet classes intended for " "quick prototyping and educational purposes. It provides a lightweight alternative " "for rapid development." ), ), datasets.BuilderConfig( name="mini", version=VERSION, description=( "ImageNet-Mini is a compact subset of ImageNet containing 100 classes with fewer " "images per class. It is intended for faster training and benchmarking while " "retaining representative class diversity." ), ), datasets.BuilderConfig( name="noface-1k", version=VERSION, description=( "Face-obfuscated variant of the standard ILSVRC2012 ImageNet dataset, where all " "human faces in images have been blurred or obscured to protect privacy, enabling " "privacy-preserving research without compromising classification utility." ), ), datasets.BuilderConfig( name="noface-100", version=VERSION, description=( "Face-obfuscated ImageNet-100 subset with the same 100 classes as the standard " "ImageNet-100, but with all human faces blurred or masked. Designed to support " "privacy-conscious applications and studies." ), ), datasets.BuilderConfig( name="noface-10", version=VERSION, description=( "Face-obfuscated Imagenette subset with 10 classes, where images have undergone " "face blurring to safeguard personal identities. Suitable for quick prototyping " "in privacy-sensitive scenarios." ), ), # datasets.BuilderConfig( # name="noface-mini", # version=VERSION, # description=( # "Face-obfuscated ImageNet-Mini subset with 100 classes, preserving the original " # "class diversity while anonymizing human faces for privacy preservation during " # "model training and evaluation." # ), # ), ] def _info(self): # depending on the config, we will set the class names with urllib.request.urlopen(self._CLASS_INDEX_URL) as response: full_mapping = json.load(response) # { "0": ["n01440764", "tench"], ... } # Build mapping from wnid to class name wnid_to_name = {value[0]: value[1] for key, value in full_mapping.items()} # Use the mapping to get formatted class names (e.g., "n01440764: tench") wnids = self._get_class_names() formatted_class_names = [ f"{wnid}: {wnid_to_name.get(wnid, wnid)}" for wnid in wnids ] # generic description for the dataset -- specific subsets are indicated by the config name return datasets.DatasetInfo( description=( "Comprehensive ImageNet dataset collection including multiple variants such as " "the full ILSVRC2012 ImageNet (1k), ImageNet-100, Imagenette (10), and ImageNet-Mini subsets, " "alongside their corresponding face-obfuscated (blurred) versions. " "This dataset supports privacy-preserving research by providing both original and " "face-obfuscated images for each subset." ), features=datasets.Features( { "image": datasets.Image(), "label": datasets.ClassLabel(names=formatted_class_names), "is_one": datasets.Value("bool"), "is_ten": datasets.Value("bool"), } ), supervised_keys=("image", "label"), # type: ignore homepage="https://image-net.org/face-obfuscation/", citation="""@inproceedings{yang2022study, title={A Study of Face Obfuscation in ImageNet}, author={Yang, Kevin and Yau, John and Fei-Fei, Li and Deng, Jia and Russakovsky, Olga}, booktitle={International Conference on Machine Learning (ICML)}, year={2022} }""", ) def _split_generators(self, dl_manager): if dl_manager.is_streaming: raise NotImplementedError( "Streaming is not yet supported for this dataset." ) # cache location for the dataset downloads inet_train_path = ( "/gpfs/data/shared/imagenet/ILSVRC2012/ILSVRC2012_img_train.tar" ) inet_val_path = "/gpfs/data/shared/imagenet/ILSVRC2012/ILSVRC2012_img_val.tar" inet_devkit_archive_path = ( "/gpfs/data/shared/imagenet/ILSVRC2012/ILSVRC2012_devkit_t12.tar.gz" ) noface_train_path = "/gpfs/data/shared/imagenet_blurred/train_blurred.tar.gz" noface_val_path = "/gpfs/data/shared/imagenet_blurred/val_blurred.tar.gz" # download if missing (this should only happen once) if not os.path.exists(inet_train_path): inet_train_path = dl_manager.download(self._INET_TRAIN_URL) if not os.path.exists(inet_val_path): inet_val_path = dl_manager.download(self._INET_VAL_URL) if not os.path.exists(inet_devkit_archive_path): inet_devkit_archive_path = dl_manager.download(self._INET_DEVKIT_URL) if not os.path.exists(noface_train_path): noface_train_path = dl_manager.download(self._NOFACE_TRAIN_URL) if not os.path.exists(noface_val_path): noface_val_path = dl_manager.download(self._NOFACE_VAL_URL) # Define extraction directories noface_train_extract_dir = noface_train_path + "_extracted" # type: ignore noface_val_extract_dir = noface_val_path + "_extracted" # type: ignore # Extract train if not already extracted if not os.path.exists(noface_train_extract_dir): print( "Extracting noface training archive... This will take a while, but will make the overall process much faster. " "Do not worry if this takes a long time. Be patient, this is a one-time operation. In the future, this will be cached." ) with tarfile.open(noface_train_path, "r:gz") as tar: # type: ignore tar.extractall(path=noface_train_extract_dir) print("Noface training extraction done.") # Extract val if not already extracted if not os.path.exists(noface_val_extract_dir): print( "Extracting noface validation archive... This may take a while, but will make the overall process much faster. " "Do not worry if this takes a long time. Be patient, this is a one-time operation. In the future, this will be cached." ) with tarfile.open(noface_val_path, "r:gz") as tar: # type: ignore tar.extractall(path=noface_val_extract_dir) print("Noface alidation extraction done.") return self._return_generated_splits( inet_train_path, inet_val_path, inet_devkit_archive_path, noface_train_extract_dir, noface_val_extract_dir, ) def _generate_examples(self, archive_path, split, devkit_archive=None): config_name = self.config.name # Depending on the config name, we will generate examples for the appropriate subset if config_name == "1k": return self._generate_full_imagenet( archive_path, split, devkit_archive=devkit_archive ) elif config_name == "100": return self._generate_nonblur_subset( archive_path, split, wnid_list=self._inet100_class_names(), devkit_archive=devkit_archive, ) elif config_name == "mini": return self._generate_nonblur_subset( archive_path, split, wnid_list=self._inet_mini_class_names(), devkit_archive=devkit_archive, ) elif config_name == "10": return self._generate_nonblur_subset( archive_path, split, wnid_list=self._imagenette_class_names(), devkit_archive=devkit_archive, ) elif config_name == "noface-1k": train_metadata = "https://huggingface.co/datasets/randall-lab/face-obfuscated-imagenet/resolve/main/metadata/noface_1k/train.txt" val_metadata = "https://huggingface.co/datasets/randall-lab/face-obfuscated-imagenet/resolve/main/metadata/noface_1k/val.txt" return self._generate_noface( archive_path, split, train_metadata, val_metadata ) elif config_name == "noface-100": train_metadata = "https://huggingface.co/datasets/randall-lab/face-obfuscated-imagenet/resolve/main/metadata/noface_100/train.txt" val_metadata = "https://huggingface.co/datasets/randall-lab/face-obfuscated-imagenet/resolve/main/metadata/noface_100/val.txt" return self._generate_noface( archive_path, split, train_metadata, val_metadata ) elif config_name == "noface-10": train_metadata = "https://huggingface.co/datasets/randall-lab/face-obfuscated-imagenet/resolve/main/metadata/noface_10/train.txt" val_metadata = "https://huggingface.co/datasets/randall-lab/face-obfuscated-imagenet/resolve/main/metadata/noface_10/val.txt" return self._generate_noface( archive_path, split, train_metadata, val_metadata ) # elif config_name == "noface-mini": # # TODO: Update the URLs to point to the correct metadata files # train_metadata = "/path/to/noface_mini/train.txt" # val_metadata = "/path/to/noface_mini/val.txt" # return self._generate_noface( # archive_path, split, train_metadata, val_metadata # ) else: raise ValueError( f"Unknown config name: {config_name}" ) # this should never happen def _return_generated_splits( self, inet_train_path, inet_val_path, inet_devkit_path, noface_train_extract_dir, noface_val_extract_dir, ): if self.config.name.startswith("noface"): # For all blurred (noface) configs return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # type: ignore gen_kwargs={ "archive_path": noface_train_extract_dir, "split": "train", }, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # type: ignore gen_kwargs={ "archive_path": noface_val_extract_dir, "split": "validation", }, ), ] else: # For all non-blurred (normal) configs return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, # type: ignore gen_kwargs={"archive_path": inet_train_path, "split": "train"}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, # type: ignore gen_kwargs={ "archive_path": inet_val_path, "split": "validation", "devkit_archive": inet_devkit_path, }, ), ] def _generate_full_imagenet(self, archive_path, split, devkit_archive=None): if split == "train": # Open the training tar archive. with tarfile.open(archive_path, "r:*") as train_tar: # The training archive contains many tar files (one per class). sub_tar_members = [ m for m in train_tar.getmembers() if m.isfile() and m.name.endswith(".tar") ] # Download and cache the class mapping (if not already done). if not hasattr(self, "_mapping"): with urllib.request.urlopen(self._CLASS_INDEX_URL) as response: self._mapping = json.load(response) mapping = self._mapping # Build a mapping from WordNet ID (wnid) to integer label. wnid_to_label = {mapping[str(i)][0]: i for i in range(1000)} example_idx = 0 for m in sub_tar_members: # Each member’s name is e.g. "n01440764.tar"; extract the wnid. wnid = os.path.splitext(m.name)[0] if wnid not in wnid_to_label: # Skip any unexpected files. continue label = wnid_to_label[wnid] sub_tar_file = train_tar.extractfile(m) # Open the inner tar file containing images for this class. with tarfile.open(fileobj=sub_tar_file, mode="r:*") as sub_tar: for sub_m in sub_tar.getmembers(): if sub_m.isfile(): img_f = sub_tar.extractfile(sub_m) image_bytes = img_f.read() img = Image.open(io.BytesIO(image_bytes)) if img.mode != "RGB": img = img.convert("RGB") yield example_idx, { "image": img, "label": label, "is_one": False, "is_ten": False, } example_idx += 1 elif split == "validation": # For validation, open the validation tar archive. with tarfile.open(archive_path, "r:*") as val_tar: # Get all file members (each a validation image). members = [m for m in val_tar.getmembers() if m.isfile()] # Sort by filename to ensure the same order as in the ground truth file. members = sorted(members, key=lambda m: m.name) # Open the devkit archive to extract the ground truth file. with tarfile.open(devkit_archive, "r:*") as devkit_tar: gt_member = None for m in devkit_tar.getmembers(): if m.name.endswith("ILSVRC2012_validation_ground_truth.txt"): gt_member = m break if gt_member is None: raise ValueError( "Could not find the ground truth file in the devkit archive." ) gt_file = devkit_tar.extractfile(gt_member) gt_lines = gt_file.read().decode("utf-8").strip().splitlines() if len(gt_lines) != len(members): raise ValueError( "Mismatch between the number of validation images and ground truth labels." ) for example_idx, (m, gt) in enumerate(zip(members, gt_lines)): # Convert the ground truth label from 1-indexed to 0-indexed. label = int(gt) - 1 img_f = val_tar.extractfile(m) image_bytes = img_f.read() img = Image.open(io.BytesIO(image_bytes)) if img.mode != "RGB": img = img.convert("RGB") yield example_idx, { "image": img, "label": label, "is_one": False, "is_ten": False, } def _generate_nonblur_subset( self, archive_path, split, wnid_list, devkit_archive=None ): wnid_to_new_label = {wnid: idx for idx, wnid in enumerate(wnid_list)} if split == "train": with tarfile.open(archive_path, "r:*") as train_tar: # The training tar archive contains many tar files (one per class) sub_tar_members = [ m for m in train_tar.getmembers() if m.isfile() and m.name.endswith(".tar") ] example_idx = 0 for m in sub_tar_members: # Each member’s name is, e.g., "n01440764.tar"; extract the wnid. wnid = os.path.splitext(m.name)[0] # Only process if the wnid is in our selected subset. if wnid not in wnid_to_new_label: continue new_label = wnid_to_new_label[wnid] sub_tar_file = train_tar.extractfile(m) # Open the inner tar file containing images for this class. with tarfile.open(fileobj=sub_tar_file, mode="r:*") as sub_tar: for sub_m in sub_tar.getmembers(): if sub_m.isfile(): img_f = sub_tar.extractfile(sub_m) image_bytes = img_f.read() img = Image.open(io.BytesIO(image_bytes)) if img.mode != "RGB": img = img.convert("RGB") yield example_idx, { "image": img, "label": new_label, "is_one": False, "is_ten": False, } example_idx += 1 elif split == "validation": # For validation, filter examples based on their wnid. with tarfile.open(archive_path, "r:*") as val_tar: members = [m for m in val_tar.getmembers() if m.isfile()] # Sort by filename to match the ground truth order. members = sorted(members, key=lambda m: m.name) # Open the devkit archive to extract the ground truth file. with tarfile.open(devkit_archive, "r:*") as devkit_tar: gt_member = None for m in devkit_tar.getmembers(): if m.name.endswith("ILSVRC2012_validation_ground_truth.txt"): gt_member = m break if gt_member is None: raise ValueError( "Could not find the ground truth file in the devkit archive." ) gt_file = devkit_tar.extractfile(gt_member) gt_lines = gt_file.read().decode("utf-8").strip().splitlines() # Download the full class mapping to convert the original label to wnid. with urllib.request.urlopen(self._CLASS_INDEX_URL) as response: full_mapping = json.load(response) # Build a mapping from original index (0-indexed) to wnid. idx_to_wnid = { int(key): value[0] for key, value in full_mapping.items() } example_idx = 0 # Iterate over the validation tar members paired with the ground truth lines. for m, gt in zip(members, gt_lines): original_label = int(gt) - 1 # original labels are 1-indexed wnid = idx_to_wnid.get(original_label) # Only include the example if its wnid is in our selected subset. if wnid not in wnid_to_new_label: continue new_label = wnid_to_new_label[wnid] img_f = val_tar.extractfile(m) image_bytes = img_f.read() img = Image.open(io.BytesIO(image_bytes)) if img.mode != "RGB": img = img.convert("RGB") yield example_idx, { "image": img, "label": new_label, "is_one": False, "is_ten": False, } example_idx += 1 def _generate_noface(self, archive_path, split, train_txt_url, val_txt_url): def is_valid_file(path): return os.path.isfile(path) and os.path.getsize(path) > 0 num_failed = 0 if split == "train": with urllib.request.urlopen(train_txt_url) as f: train_ids = [line.decode("utf-8").strip() for line in f if line.strip()] if not hasattr(self, "_mapping"): with urllib.request.urlopen(self._CLASS_INDEX_URL) as response: self._mapping = json.load(response) wnid_to_label = { wnid: idx for idx, wnid in enumerate(self._get_class_names()) } valid_entries = [] for file_id in train_ids: # inputs are like n01440764_2708-0-0 # Split to get all parts: ["n01440764_2708", "0", "0"] parts = file_id.split("-") base_file_id = parts[0] # "n01440764_2708" is_one = parts[1] == "1" if len(parts) > 1 else False is_ten = parts[2] == "1" if len(parts) > 2 else False wnid = base_file_id.split("_")[0] img_name = base_file_id found = False for ext in [".JPEG", ".jpg"]: candidate_path = os.path.join( archive_path, "train_blurred", wnid, img_name + ext ) if is_valid_file(candidate_path): valid_entries.append((candidate_path, wnid, is_one, is_ten)) found = True break if not found: num_failed += 1 print(f"[train] Skipped {num_failed} invalid or zero-byte files.") example_idx = 0 for img_path, wnid, is_one, is_ten in valid_entries: label = wnid_to_label[wnid] img = Image.open(img_path) if img.mode != "RGB": img = img.convert("RGB") yield example_idx, { "image": img, "label": label, "is_one": is_one, "is_ten": is_ten, } example_idx += 1 if split == "validation": with urllib.request.urlopen(val_txt_url) as f: val_ids = [line.decode("utf-8").strip() for line in f if line.strip()] if not hasattr(self, "_mapping"): with urllib.request.urlopen(self._CLASS_INDEX_URL) as response: self._mapping = json.load(response) wnid_to_label = { wnid: idx for idx, wnid in enumerate(self._get_class_names()) } valid_entries = [] for file_id in val_ids: wnid = file_id.split("_")[0] img_name = file_id[len(wnid) + 1 :] found = False for ext in [".JPEG", ".jpg"]: candidate_path = os.path.join( archive_path, "val_blurred", wnid, img_name + ext ) if is_valid_file(candidate_path): valid_entries.append((candidate_path, wnid)) found = True break if not found: num_failed += 1 print(f"[validation] Skipped {num_failed} invalid or zero-byte files.") example_idx = 0 for img_path, wnid in valid_entries: label = wnid_to_label[wnid] img = Image.open(img_path) if img.mode != "RGB": img = img.convert("RGB") yield example_idx, { "image": img, "label": label, "is_one": False, "is_ten": False, } example_idx += 1 def _get_class_names(self): # Load the full ImageNet class index from the JSON with urllib.request.urlopen(self._CLASS_INDEX_URL) as response: full_mapping = json.load(response) # { "0": ["n01440764", "tench"], ... } if self.config.name in {"1k", "noface-1k"}: return [full_mapping[str(i)][0] for i in range(1000)] if self.config.name in {"100", "noface-100"}: return self._inet100_class_names() elif self.config.name in {"10", "noface-10"}: return self._imagenette_class_names() elif self.config.name in {"mini"}: # "noface-mini" return self._inet_mini_class_names() else: raise ValueError(f"Unknown config name: {self.config.name}") # Build list of formatted class names like "n01558993: robin" return [f"{wnid}: {wnid_to_name.get(wnid, wnid)}" for wnid in wnid_list] @staticmethod def _inet100_class_names(): return [ "n02869837", "n01749939", "n02488291", "n02107142", "n13037406", "n02091831", "n04517823", "n04589890", "n03062245", "n01773797", "n01735189", "n07831146", "n07753275", "n03085013", "n04485082", "n02105505", "n01983481", "n02788148", "n03530642", "n04435653", "n02086910", "n02859443", "n13040303", "n03594734", "n02085620", "n02099849", "n01558993", "n04493381", "n02109047", "n04111531", "n02877765", "n04429376", "n02009229", "n01978455", "n02106550", "n01820546", "n01692333", "n07714571", "n02974003", "n02114855", "n03785016", "n03764736", "n03775546", "n02087046", "n07836838", "n04099969", "n04592741", "n03891251", "n02701002", "n03379051", "n02259212", "n07715103", "n03947888", "n04026417", "n02326432", "n03637318", "n01980166", "n02113799", "n02086240", "n03903868", "n02483362", "n04127249", "n02089973", "n03017168", "n02093428", "n02804414", "n02396427", "n04418357", "n02172182", "n01729322", "n02113978", "n03787032", "n02089867", "n02119022", "n03777754", "n04238763", "n02231487", "n03032252", "n02138441", "n02104029", "n03837869", "n03494278", "n04136333", "n03794056", "n03492542", "n02018207", "n04067472", "n03930630", "n03584829", "n02123045", "n04229816", "n02100583", "n03642806", "n04336792", "n03259280", "n02116738", "n02108089", "n03424325", "n01855672", "n02090622", ] @staticmethod def _inet_mini_class_names(): return [ "n01532829", "n01558993", "n01704323", "n01749939", "n01770081", "n01843383", "n01855672", "n01910747", "n01930112", "n01981276", "n02074367", "n02089867", "n02091244", "n02091831", "n02099601", "n02101006", "n02105505", "n02108089", "n02108551", "n02108915", "n02110063", "n02110341", "n02111277", "n02113712", "n02114548", "n02116738", "n02120079", "n02129165", "n02138441", "n02165456", "n02174001", "n02219486", "n02443484", "n02457408", "n02606052", "n02687172", "n02747177", "n02795169", "n02823428", "n02871525", "n02950826", "n02966193", "n02971356", "n02981792", "n03017168", "n03047690", "n03062245", "n03075370", "n03127925", "n03146219", "n03207743", "n03220513", "n03272010", "n03337140", "n03347037", "n03400231", "n03417042", "n03476684", "n03527444", "n03535780", "n03544143", "n03584254", "n03676483", "n03770439", "n03773504", "n03775546", "n03838899", "n03854065", "n03888605", "n03908618", "n03924679", "n03980874", "n03998194", "n04067472", "n04146614", "n04149813", "n04243546", "n04251144", "n04258138", "n04275548", "n04296562", "n04389033", "n04418357", "n04435653", "n04443257", "n04509417", "n04515003", "n04522168", "n04596742", "n04604644", "n04612504", "n06794110", "n07584110", "n07613480", "n07697537", "n07747607", "n09246464", "n09256479", "n13054560", "n13133613", ] @staticmethod def _imagenette_class_names(): return [ "n01440764", "n02102040", "n02979186", "n03000684", "n03028079", "n03394916", "n03417042", "n03425413", "n03445777", "n03888257", ]