Datasets:

Languages:
English
ArXiv:
License:
aspartate commited on
Commit
504ec61
·
1 Parent(s): 8900b6c

downloader script

Browse files
Files changed (1) hide show
  1. Patho-Bench.py +115 -0
Patho-Bench.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import datasets
3
+ from datasets import Features, Value
4
+ from huggingface_hub import snapshot_download
5
+ import glob
6
+ import yaml
7
+
8
+
9
+ class PathoBenchConfig(datasets.BuilderConfig):
10
+ def __init__(self, **kwargs):
11
+
12
+ # Extract task_in_dataset and dataset_to_download from kwargs
13
+ self.task_in_dataset = kwargs.pop("task_in_dataset", None)
14
+ self.dataset_to_download = kwargs.pop("dataset_to_download", None)
15
+ self.force_download = kwargs.pop("force_download", True)
16
+
17
+ # Set default values for task_in_dataset and dataset_to_download
18
+ if self.dataset_to_download is None and self.task_in_dataset is None:
19
+ # If neither are provided, default both to '*'
20
+ self.dataset_to_download = '*'
21
+ self.task_in_dataset = '*'
22
+ elif self.dataset_to_download is None and self.task_in_dataset is not None:
23
+ # If task_in_dataset is provided but dataset_to_download is not, raise an error
24
+ raise AssertionError("Dataset needs to be defined for the task_in_dataset provided.")
25
+ elif self.dataset_to_download is not None and self.task_in_dataset is None:
26
+ # If dataset_to_download is provided but task_in_dataset is not, default task_in_dataset to '*'
27
+ self.task_in_dataset = '*'
28
+
29
+ super().__init__(**kwargs)
30
+
31
+
32
+ class PathoBenchDataset(datasets.GeneratorBasedBuilder):
33
+ BUILDER_CONFIGS = [
34
+ PathoBenchConfig(name="custom_config", version="1.0.0", description="PathoBench config")
35
+ ]
36
+ BUILDER_CONFIG_CLASS = PathoBenchConfig
37
+
38
+
39
+ def _info(self):
40
+ return datasets.DatasetInfo(
41
+ description="PathoBench: collection of canonical computational pathology tasks",
42
+ homepage="https://github.com/mahmoodlab/patho-bench",
43
+ license="CC BY-NC-SA 4.0 Deed",
44
+ features=Features({
45
+ 'path': Value('string')
46
+ })
47
+ )
48
+
49
+ def _split_generators(self, dl_manager):
50
+ repo_id = self.repo_id
51
+ dataset_to_download = self.config.dataset_to_download
52
+ local_dir = self._cache_dir_root
53
+ force_download = self.config.force_download
54
+ task_in_dataset = self.config.task_in_dataset
55
+
56
+ # Ensure the base local directory exists
57
+ os.makedirs(local_dir, exist_ok=True)
58
+
59
+ # download available_splits.yaml if not yet downloaded
60
+ snapshot_download(
61
+ repo_id=repo_id,
62
+ allow_patterns=["available_splits.yaml"],
63
+ repo_type="dataset",
64
+ local_dir=local_dir,
65
+ force_download=force_download,
66
+ )
67
+
68
+ # open yaml and get a list of datasets implemented
69
+ with open(os.path.join(local_dir, "available_splits.yaml"), 'r') as file:
70
+ available_splits = yaml.safe_load(file)
71
+
72
+ # ensure dataset_to_download is in implemented_datasets
73
+ if dataset_to_download != "*":
74
+ assert dataset_to_download in available_splits, f"{dataset_to_download} was not found. Available splits: ({available_splits})"
75
+ if task_in_dataset != "*":
76
+ assert task_in_dataset in available_splits[dataset_to_download], f"{task_in_dataset} was not found in {dataset_to_download}. Available tasks: ({available_splits[dataset_to_download]})"
77
+
78
+ # Determine parent folder based on dataset naming
79
+ os.makedirs(local_dir, exist_ok=True)
80
+
81
+ # Determine the download pattern
82
+ if dataset_to_download == "*":
83
+ allow_patterns = [f"*/*"]
84
+ else:
85
+ task_path = "*" if task_in_dataset == '*' else f"{task_in_dataset}/*"
86
+ allow_patterns = [f"{dataset_to_download}/{task_path}"]
87
+
88
+ # Download the required datasets
89
+ snapshot_download(
90
+ repo_id=repo_id,
91
+ allow_patterns=allow_patterns,
92
+ repo_type="dataset",
93
+ local_dir=local_dir,
94
+ force_download=force_download,
95
+ )
96
+
97
+ # Locate all .tsv files
98
+ search_pattern = os.path.join(local_dir, '**', '*.tsv')
99
+ all_tsv_splits = glob.glob(search_pattern, recursive=True)
100
+
101
+ return [
102
+ datasets.SplitGenerator(
103
+ name="full",
104
+ gen_kwargs={"filepath": all_tsv_splits},
105
+ )
106
+ ]
107
+
108
+
109
+ def _generate_examples(self, filepath):
110
+ idx = 0
111
+ for file in filepath:
112
+ yield idx, {
113
+ 'path': file
114
+ }
115
+ idx += 1