Datasets:

Modalities:
Text
Languages:
English
ArXiv:
Libraries:
Datasets
License:
StefanH commited on
Commit
a32ce65
·
1 Parent(s): 9f52e0a

Update: loading script TODO test

Browse files
Files changed (2) hide show
  1. dev.py +0 -8
  2. utcd.py +2 -10
dev.py DELETED
@@ -1,8 +0,0 @@
1
- from stefutil import *
2
-
3
-
4
- if __name__ == '__main__':
5
- from datasets import load_dataset
6
-
7
- dnm = 'claritylab/utcd'
8
- mic(load_dataset(dnm))
 
 
 
 
 
 
 
 
 
utcd.py CHANGED
@@ -116,7 +116,6 @@ class Utcd(datasets.GeneratorBasedBuilder):
116
  def _info(self):
117
  dnms = self._get_dataset_names()
118
  labels = [config(f'{dnm}.splits.{split}.labels') for dnm in dnms for split in ['train', 'test']]
119
- mic(dnms, labels)
120
 
121
  labels = sorted(set().union(*labels)) # drop duplicate labels across datasets
122
  return datasets.DatasetInfo(
@@ -136,22 +135,15 @@ class Utcd(datasets.GeneratorBasedBuilder):
136
  dnms = self._get_dataset_names()
137
  dir_nm = self.config.to_dir_name()
138
  split2paths = {s: [os_join(dir_nm, dnm, s) for dnm in dnms] for s in splits}
139
- mic(split2paths)
140
 
141
- downloaded_files = dl_manager.download_and_extract('datasets.zip')
142
- mic(downloaded_files)
143
- # raise NotImplementedError
144
-
145
- # return [
146
- # datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
147
- # datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
148
- # ]
149
  return [
150
  datasets.SplitGenerator(name=_split2hf_split[s], gen_kwargs={"filepath": split2paths[s]}) for s in splits
151
  ]
152
 
153
  def _generate_examples(self, filepath: str):
154
  # each call is for one split of one dataset
 
155
  dnm = filepath.split(os.sep)[-2]
156
  id_ = 0
157
  with open(filepath, encoding='utf-8') as f:
 
116
  def _info(self):
117
  dnms = self._get_dataset_names()
118
  labels = [config(f'{dnm}.splits.{split}.labels') for dnm in dnms for split in ['train', 'test']]
 
119
 
120
  labels = sorted(set().union(*labels)) # drop duplicate labels across datasets
121
  return datasets.DatasetInfo(
 
135
  dnms = self._get_dataset_names()
136
  dir_nm = self.config.to_dir_name()
137
  split2paths = {s: [os_join(dir_nm, dnm, s) for dnm in dnms] for s in splits}
 
138
 
139
+ dl_manager.download_and_extract('datasets.zip')
 
 
 
 
 
 
 
140
  return [
141
  datasets.SplitGenerator(name=_split2hf_split[s], gen_kwargs={"filepath": split2paths[s]}) for s in splits
142
  ]
143
 
144
  def _generate_examples(self, filepath: str):
145
  # each call is for one split of one dataset
146
+ mic(filepath, type(filepath))
147
  dnm = filepath.split(os.sep)[-2]
148
  id_ = 0
149
  with open(filepath, encoding='utf-8') as f: