Datasets:
File size: 6,510 Bytes
24925b9 9f52e0a 24925b9 9f52e0a 24925b9 9f52e0a 24925b9 51d3b65 24925b9 38ab900 b74cb80 24925b9 df2a6a4 24925b9 9f52e0a df2a6a4 b74cb80 df2a6a4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 |
"""Universal Text Classification Dataset (UTCD)"""
import os
import json
from os.path import join as os_join
from typing import List
import datasets
from stefutil import *
_DESCRIPTION = """
UTCD is a compilation of 18 classification datasets spanning 3 categories of Sentiment,
Intent/Dialogue and Topic classification. UTCD focuses on the task of zero-shot text classification where the
candidate labels are descriptive of the text being classified. UTCD consists of ~ 6M/800K train/test examples.
"""
# TODO: citation
_URL = "https://github.com/ChrisIsKing/zero-shot-text-classification/tree/master"
_URL_ZIP = "https://huggingface.co/datasets/claritylab/UTCD/raw/main/datasets.zip"
_VERSION = datasets.Version('0.0.1')
class UtcdConfig(datasets.BuilderConfig):
"""BuilderConfig for SuperGLUE."""
def __init__(self, domain: str, normalize_aspect: bool = False, **kwargs):
"""BuilderConfig for UTCD.
Args:
domain: `string`, dataset domain, one of [`in`, `out`].
normalize_aspect: `bool`, if True, an aspect-normalized version of the dataset is returned.
**kwargs: keyword arguments forwarded to super.
"""
# Version history:
# 0.0.1: Initial version.
super(UtcdConfig, self).__init__(version=_VERSION, **kwargs)
ca.check_mismatch('Dataset Domain', domain, ['in', 'out'])
self.domain = domain
self.normalize_aspect = normalize_aspect
def to_dir_name(self):
"""
:return: directory name for the dataset files for this config stored on hub
"""
domain_str = 'in-domain' if self.domain == 'in' else 'out-of-domain'
prefix = 'aspect-normalized-' if self.normalize_aspect else ''
return f'{prefix}{domain_str}'
config = StefConfig('config.json')
# mic(config('go_emotion'))
_split2hf_split = dict(train=datasets.Split.TRAIN, eval=datasets.Split.VALIDATION, test=datasets.Split.TEST)
class Utcd(datasets.GeneratorBasedBuilder):
"""UTCD: Universal Text Classification Dataset. Version 0.0."""
# _config = dict(
# go_emotion=dict(aspect='sentiment', domain='in', name='GoEmotions'),
# sentiment_tweets_2020=dict(aspect='sentiment', domain='in', name='TweetEval'),
# emotion=dict(aspect='sentiment', domain='in', name='Emotion'),
# sgd=dict(aspect='intent', domain='in', name='Schema-Guided Dialogue'),
# clinc_150=dict(aspect='intent', domain='in', name='Clinc-150'),
# slurp=dict(aspect='intent', domain='in', name='SLURP'),
# ag_news=dict(aspect='topic', domain='in', name='AG News'),
# dbpedia=dict(aspect='topic', domain='in', name='DBpedia'),
# yahoo=dict(aspect='topic', domain='in', name='Yahoo Answer Topics'),
#
# amazon_polarity=dict(aspect='sentiment', domain='out', name='Amazon Review Polarity'),
# finance_sentiment=dict( aspect='sentiment', domain='out', name='Financial Phrase Bank'),
# yelp=dict(aspect='sentiment', domain='out', name='Yelp Review'),
# banking77=dict(aspect='intent', domain='out', name='Banking77'),
# snips=dict(aspect='intent', domain='out', name='SNIPS'),
# nlu_evaluation=dict(aspect='intent', domain='out', name='NLU Evaluation'),
# multi_eurlex=dict(aspect='topic', domain='out', name='MultiEURLEX'),
# patent=dict(aspect='topic', domain='out', name='Big Patent'),
# consumer_finance=dict(aspect='topic', domain='out', name='Consumer Finance Complaints')
# )
VERSION = _VERSION
BUILDER_CONFIGS = [
UtcdConfig(
name='in-domain',
description='All in-domain datasets.',
domain='in',
normalize_aspect=False
),
UtcdConfig(
name='aspect-normalized-in-domain',
description='Aspect-normalized version of all in-domain datasets.',
domain='in',
normalize_aspect=True
),
UtcdConfig(
name='out-of-domain',
description='All out-of-domain datasets.',
domain='out',
normalize_aspect=False
),
UtcdConfig(
name='aspect-normalized-out-of-domain',
description='Aspect-normalized version of all out-of-domain datasets.',
domain='out',
normalize_aspect=True
)
]
def _get_dataset_names(self):
return [dnm for dnm, d_dset in config().items() if d_dset['domain'] == self.config.domain]
def _info(self):
dnms = self._get_dataset_names()
labels = [config(f'{dnm}.splits.{split}.labels') for dnm in dnms for split in ['train', 'test']]
labels = sorted(set().union(*labels)) # drop duplicate labels across datasets
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
text=datasets.Value(dtype='string'),
labels=datasets.Sequence(feature=datasets.ClassLabel(names=labels), length=-1), # for multi-label
dataset_name=datasets.ClassLabel(names=dnms)
),
homepage=_URL
# TODO: citation
)
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
# for aspect-normalized versions of the dataset, we include a validation set
splits = ['train', 'eval', 'test'] if self.config.normalize_aspect else ['train', 'test']
dnms = self._get_dataset_names()
dir_nm = self.config.to_dir_name()
# TODO: update root dataset naming version & dataset split naming
base_path = dl_manager.download_and_extract('datasets.zip')
split2paths = {s: [os_join(base_path, f'{dir_nm}_split', dnm, f'{s}.json') for dnm in dnms] for s in splits}
# order of dataset file paths will be deterministic for deterministic dataset name ordering
return [
datasets.SplitGenerator(name=_split2hf_split[s], gen_kwargs=dict(filepath=split2paths[s])) for s in splits
]
def _generate_examples(self, filepath: List[str]):
id_ = 0
for path in filepath: # each file for one split of one dataset
dnm = path.split(os.sep)[-2]
with open(path, encoding='utf-8') as fl:
dset = json.load(fl)
for txt, labels in dset.items():
yield id_, dict(text=txt, labels=labels, dataset_name=dnm)
id_ += 1
|