parquet-converter commited on
Commit
5ccba02
·
1 Parent(s): ed6ef27

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,37 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- # Audio files - uncompressed
29
- *.pcm filter=lfs diff=lfs merge=lfs -text
30
- *.sam filter=lfs diff=lfs merge=lfs -text
31
- *.raw filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - compressed
33
- *.aac filter=lfs diff=lfs merge=lfs -text
34
- *.flac filter=lfs diff=lfs merge=lfs -text
35
- *.mp3 filter=lfs diff=lfs merge=lfs -text
36
- *.ogg filter=lfs diff=lfs merge=lfs -text
37
- *.wav filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,331 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - other
4
- language_creators:
5
- - machine-generated
6
- language:
7
- - en
8
- license:
9
- - cc-by-sa-4.0
10
- multilinguality:
11
- - monolingual
12
- pretty_name: Adversarial GLUE
13
- size_categories:
14
- - n<1K
15
- source_datasets:
16
- - extended|glue
17
- task_categories:
18
- - text-classification
19
- task_ids:
20
- - natural-language-inference
21
- - sentiment-classification
22
- configs:
23
- - adv_mnli
24
- - adv_mnli_mismatched
25
- - adv_qnli
26
- - adv_qqp
27
- - adv_rte
28
- - adv_sst2
29
- tags:
30
- - paraphrase-identification
31
- - qa-nli
32
- dataset_info:
33
- - config_name: adv_sst2
34
- features:
35
- - name: sentence
36
- dtype: string
37
- - name: label
38
- dtype:
39
- class_label:
40
- names:
41
- 0: negative
42
- 1: positive
43
- - name: idx
44
- dtype: int32
45
- splits:
46
- - name: validation
47
- num_bytes: 16595
48
- num_examples: 148
49
- download_size: 40662
50
- dataset_size: 16595
51
- - config_name: adv_qqp
52
- features:
53
- - name: question1
54
- dtype: string
55
- - name: question2
56
- dtype: string
57
- - name: label
58
- dtype:
59
- class_label:
60
- names:
61
- 0: not_duplicate
62
- 1: duplicate
63
- - name: idx
64
- dtype: int32
65
- splits:
66
- - name: validation
67
- num_bytes: 9926
68
- num_examples: 78
69
- download_size: 40662
70
- dataset_size: 9926
71
- - config_name: adv_mnli
72
- features:
73
- - name: premise
74
- dtype: string
75
- - name: hypothesis
76
- dtype: string
77
- - name: label
78
- dtype:
79
- class_label:
80
- names:
81
- 0: entailment
82
- 1: neutral
83
- 2: contradiction
84
- - name: idx
85
- dtype: int32
86
- splits:
87
- - name: validation
88
- num_bytes: 23736
89
- num_examples: 121
90
- download_size: 40662
91
- dataset_size: 23736
92
- - config_name: adv_mnli_mismatched
93
- features:
94
- - name: premise
95
- dtype: string
96
- - name: hypothesis
97
- dtype: string
98
- - name: label
99
- dtype:
100
- class_label:
101
- names:
102
- 0: entailment
103
- 1: neutral
104
- 2: contradiction
105
- - name: idx
106
- dtype: int32
107
- splits:
108
- - name: validation
109
- num_bytes: 40982
110
- num_examples: 162
111
- download_size: 40662
112
- dataset_size: 40982
113
- - config_name: adv_qnli
114
- features:
115
- - name: question
116
- dtype: string
117
- - name: sentence
118
- dtype: string
119
- - name: label
120
- dtype:
121
- class_label:
122
- names:
123
- 0: entailment
124
- 1: not_entailment
125
- - name: idx
126
- dtype: int32
127
- splits:
128
- - name: validation
129
- num_bytes: 34877
130
- num_examples: 148
131
- download_size: 40662
132
- dataset_size: 34877
133
- - config_name: adv_rte
134
- features:
135
- - name: sentence1
136
- dtype: string
137
- - name: sentence2
138
- dtype: string
139
- - name: label
140
- dtype:
141
- class_label:
142
- names:
143
- 0: entailment
144
- 1: not_entailment
145
- - name: idx
146
- dtype: int32
147
- splits:
148
- - name: validation
149
- num_bytes: 25998
150
- num_examples: 81
151
- download_size: 40662
152
- dataset_size: 25998
153
- ---
154
-
155
- # Dataset Card for Adversarial GLUE
156
-
157
- ## Table of Contents
158
- - [Table of Contents](#table-of-contents)
159
- - [Dataset Description](#dataset-description)
160
- - [Dataset Summary](#dataset-summary)
161
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
162
- - [Languages](#languages)
163
- - [Dataset Structure](#dataset-structure)
164
- - [Data Instances](#data-instances)
165
- - [Data Fields](#data-fields)
166
- - [Data Splits](#data-splits)
167
- - [Dataset Creation](#dataset-creation)
168
- - [Curation Rationale](#curation-rationale)
169
- - [Source Data](#source-data)
170
- - [Annotations](#annotations)
171
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
172
- - [Considerations for Using the Data](#considerations-for-using-the-data)
173
- - [Social Impact of Dataset](#social-impact-of-dataset)
174
- - [Discussion of Biases](#discussion-of-biases)
175
- - [Other Known Limitations](#other-known-limitations)
176
- - [Additional Information](#additional-information)
177
- - [Dataset Curators](#dataset-curators)
178
- - [Licensing Information](#licensing-information)
179
- - [Citation Information](#citation-information)
180
- - [Contributions](#contributions)
181
-
182
-
183
- ## Dataset Description
184
-
185
- - **Homepage:** https://adversarialglue.github.io/
186
- - **Repository:**
187
- - **Paper:** [arXiv](https://arxiv.org/pdf/2111.02840.pdf)
188
- - **Leaderboard:**
189
- - **Point of Contact:**
190
-
191
- ### Dataset Summary
192
-
193
- Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark that focuses on the adversarial robustness evaluation of language models. It covers five natural language understanding tasks from the famous GLUE tasks and is an adversarial version of GLUE benchmark.
194
-
195
- AdvGLUE considers textual adversarial attacks from different perspectives and hierarchies, including word-level transformations, sentence-level manipulations, and human-written adversarial examples, which provide comprehensive coverage of various adversarial linguistic phenomena.
196
-
197
- ### Supported Tasks and Leaderboards
198
-
199
- Leaderboard available on the homepage: [https://adversarialglue.github.io/](https://adversarialglue.github.io/).
200
-
201
- ### Languages
202
-
203
- AdvGLUE deviates from the GLUE dataset, which has a base language of English.
204
-
205
- ## Dataset Structure
206
-
207
- ### Data Instances
208
-
209
- #### default
210
-
211
- - **Size of downloaded dataset files:** 198 KB
212
- - **Example**:
213
- ```python
214
- >>> datasets.load_dataset('adv_glue', 'adv_sst2')['validation'][0]
215
- {'sentence': "it 's an uneven treat that bores fun at the democratic exercise while also examining its significance for those who take part .", 'label': 1, 'idx': 0}
216
- ```
217
-
218
- ### Data Fields
219
-
220
- The data fields are the same as in the GLUE dataset, which differ by task.
221
-
222
-
223
- The data fields are the same among all splits.
224
-
225
- #### adv_mnli
226
- - `premise`: a `string` feature.
227
- - `hypothesis`: a `string` feature.
228
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
229
- - `idx`: a `int32` feature.
230
-
231
- #### adv_mnli_matched
232
- - `premise`: a `string` feature.
233
- - `hypothesis`: a `string` feature.
234
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
235
- - `idx`: a `int32` feature.
236
-
237
- #### adv_mnli_mismatched
238
- - `premise`: a `string` feature.
239
- - `hypothesis`: a `string` feature.
240
- - `label`: a classification label, with possible values including `entailment` (0), `neutral` (1), `contradiction` (2).
241
- - `idx`: a `int32` feature.
242
-
243
- #### adv_qnli
244
-
245
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
246
-
247
- #### adv_qqp
248
-
249
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
250
-
251
- #### adv_rte
252
-
253
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
254
-
255
- #### adv_sst2
256
-
257
- [More Information Needed](https://github.com/huggingface/datasets/blob/master/CONTRIBUTING.md#how-to-contribute-to-the-dataset-cards)
258
-
259
- ### Data Splits
260
-
261
- Adversarial GLUE provides only a 'dev' split.
262
-
263
- ## Dataset Creation
264
-
265
- ### Curation Rationale
266
-
267
- [More Information Needed]
268
-
269
- ### Source Data
270
-
271
- #### Initial Data Collection and Normalization
272
-
273
- [More Information Needed]
274
-
275
- #### Who are the source language producers?
276
-
277
- [More Information Needed]
278
-
279
- ### Annotations
280
-
281
- #### Annotation process
282
-
283
- [More Information Needed]
284
-
285
- #### Who are the annotators?
286
-
287
- [More Information Needed]
288
-
289
- ### Personal and Sensitive Information
290
-
291
- [More Information Needed]
292
-
293
- ## Considerations for Using the Data
294
-
295
- ### Social Impact of Dataset
296
-
297
- [More Information Needed]
298
-
299
- ### Discussion of Biases
300
-
301
- [More Information Needed]
302
-
303
- ### Other Known Limitations
304
-
305
- [More Information Needed]
306
-
307
- ## Additional Information
308
-
309
- ### Dataset Curators
310
-
311
- [More Information Needed]
312
-
313
- ### Licensing Information
314
-
315
- The dataset is distributed under the [CC BY-SA 4.0](http://creativecommons.org/licenses/by-sa/4.0/legalcode) license.
316
-
317
- ### Citation Information
318
-
319
- ```bibtex
320
- @article{Wang2021AdversarialGA,
321
- title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},
322
- author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},
323
- journal={ArXiv},
324
- year={2021},
325
- volume={abs/2111.02840}
326
- }
327
- ```
328
-
329
- ### Contributions
330
-
331
- Thanks to [@jxmorris12](https://github.com/jxmorris12) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
adv_glue.py DELETED
@@ -1,330 +0,0 @@
1
- """The Adversarial GLUE (AdvGLUE) benchmark.
2
- Homepage: https://adversarialglue.github.io/
3
- """
4
- import json
5
- import os
6
- import textwrap
7
-
8
- import datasets
9
-
10
-
11
- _ADV_GLUE_CITATION = """\
12
- @article{Wang2021AdversarialGA,
13
- title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},
14
- author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},
15
- journal={ArXiv},
16
- year={2021},
17
- volume={abs/2111.02840}
18
- }
19
- """
20
-
21
- _ADV_GLUE_DESCRIPTION = """\
22
- Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark
23
- that focuses on the adversarial robustness evaluation of language models. It covers five
24
- natural language understanding tasks from the famous GLUE tasks and is an adversarial
25
- version of GLUE benchmark.
26
- """
27
-
28
- _MNLI_BASE_KWARGS = dict(
29
- text_features={
30
- "premise": "premise",
31
- "hypothesis": "hypothesis",
32
- },
33
- label_classes=["entailment", "neutral", "contradiction"],
34
- label_column="label",
35
- data_url="https://dl.fbaipublicfiles.com/glue/data/MNLI.zip",
36
- data_dir="MNLI",
37
- citation=textwrap.dedent(
38
- """\
39
- @InProceedings{N18-1101,
40
- author = "Williams, Adina
41
- and Nangia, Nikita
42
- and Bowman, Samuel",
43
- title = "A Broad-Coverage Challenge Corpus for
44
- Sentence Understanding through Inference",
45
- booktitle = "Proceedings of the 2018 Conference of
46
- the North American Chapter of the
47
- Association for Computational Linguistics:
48
- Human Language Technologies, Volume 1 (Long
49
- Papers)",
50
- year = "2018",
51
- publisher = "Association for Computational Linguistics",
52
- pages = "1112--1122",
53
- location = "New Orleans, Louisiana",
54
- url = "http://aclweb.org/anthology/N18-1101"
55
- }
56
- @article{bowman2015large,
57
- title={A large annotated corpus for learning natural language inference},
58
- author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},
59
- journal={arXiv preprint arXiv:1508.05326},
60
- year={2015}
61
- }"""
62
- ),
63
- url="http://www.nyu.edu/projects/bowman/multinli/",
64
- )
65
-
66
- ADVGLUE_DEV_URL = "https://adversarialglue.github.io/dataset/dev.zip"
67
-
68
-
69
- class AdvGlueConfig(datasets.BuilderConfig):
70
- """BuilderConfig for Adversarial GLUE."""
71
-
72
- def __init__(
73
- self,
74
- text_features,
75
- label_column,
76
- data_url,
77
- data_dir,
78
- citation,
79
- url,
80
- label_classes=None,
81
- process_label=lambda x: x,
82
- **kwargs,
83
- ):
84
- """BuilderConfig for Adversarial GLUE.
85
-
86
- Args:
87
- text_features: `dict[string, string]`, map from the name of the feature
88
- dict for each text field to the name of the column in the tsv file
89
- label_column: `string`, name of the column in the tsv file corresponding
90
- to the label
91
- data_url: `string`, url to download the zip file from
92
- data_dir: `string`, the path to the folder containing the tsv files in the
93
- downloaded zip
94
- citation: `string`, citation for the data set
95
- url: `string`, url for information about the data set
96
- label_classes: `list[string]`, the list of classes if the label is
97
- categorical. If not provided, then the label will be of type
98
- `datasets.Value('float32')`.
99
- process_label: `Function[string, any]`, function taking in the raw value
100
- of the label and processing it to the form required by the label feature
101
- **kwargs: keyword arguments forwarded to super.
102
- """
103
- super(AdvGlueConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
104
- self.text_features = text_features
105
- self.label_column = label_column
106
- self.label_classes = label_classes
107
- self.data_url = data_url
108
- self.data_dir = data_dir
109
- self.citation = citation
110
- self.url = url
111
- self.process_label = process_label
112
-
113
-
114
- ADVGLUE_BUILDER_CONFIGS = [
115
- AdvGlueConfig(
116
- name="adv_sst2",
117
- description=textwrap.dedent(
118
- """Adversarial version of SST-2.
119
- The Stanford Sentiment Treebank consists of sentences from movie reviews and
120
- human annotations of their sentiment. The task is to predict the sentiment of a
121
- given sentence. We use the two-way (positive/negative) class split, and use only
122
- sentence-level labels."""
123
- ),
124
- text_features={"sentence": "sentence"},
125
- label_classes=["negative", "positive"],
126
- label_column="label",
127
- data_url="https://dl.fbaipublicfiles.com/glue/data/SST-2.zip",
128
- data_dir="SST-2",
129
- citation=textwrap.dedent(
130
- """\
131
- @inproceedings{socher2013recursive,
132
- title={Recursive deep models for semantic compositionality over a sentiment treebank},
133
- author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher},
134
- booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing},
135
- pages={1631--1642},
136
- year={2013}
137
- }"""
138
- ),
139
- url="https://datasets.stanford.edu/sentiment/index.html",
140
- ),
141
- AdvGlueConfig(
142
- name="adv_qqp",
143
- description=textwrap.dedent(
144
- """Adversarial version of QQP.
145
- The Quora Question Pairs2 dataset is a collection of question pairs from the
146
- community question-answering website Quora. The task is to determine whether a
147
- pair of questions are semantically equivalent."""
148
- ),
149
- text_features={
150
- "question1": "question1",
151
- "question2": "question2",
152
- },
153
- label_classes=["not_duplicate", "duplicate"],
154
- label_column="label",
155
- data_url="https://dl.fbaipublicfiles.com/glue/data/QQP-clean.zip",
156
- data_dir="QQP",
157
- citation=textwrap.dedent(
158
- """\
159
- @online{WinNT,
160
- author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel},
161
- title = {First Quora Dataset Release: Question Pairs},
162
- year = {2017},
163
- url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs},
164
- urldate = {2019-04-03}
165
- }"""
166
- ),
167
- url="https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs",
168
- ),
169
- AdvGlueConfig(
170
- name="adv_mnli",
171
- description=textwrap.dedent(
172
- """Adversarial version of MNLI.
173
- The Multi-Genre Natural Language Inference Corpus is a crowdsourced
174
- collection of sentence pairs with textual entailment annotations. Given a premise sentence
175
- and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis
176
- (entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are
177
- gathered from ten different sources, including transcribed speech, fiction, and government reports.
178
- We use the standard test set, for which we obtained private labels from the authors, and evaluate
179
- on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend
180
- the SNLI corpus as 550k examples of auxiliary training data."""
181
- ),
182
- **_MNLI_BASE_KWARGS,
183
- ),
184
- AdvGlueConfig(
185
- name="adv_mnli_mismatched",
186
- description=textwrap.dedent(
187
- """Adversarial version of MNLI-mismatched.
188
- The mismatched validation and test splits from MNLI.
189
- See the "mnli" BuilderConfig for additional information."""
190
- ),
191
- **_MNLI_BASE_KWARGS,
192
- ),
193
- AdvGlueConfig(
194
- name="adv_qnli",
195
- description=textwrap.dedent(
196
- """Adversarial version of QNLI.
197
- The Stanford Question Answering Dataset is a question-answering
198
- dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn
199
- from Wikipedia) contains the answer to the corresponding question (written by an annotator). We
200
- convert the task into sentence pair classification by forming a pair between each question and each
201
- sentence in the corresponding context, and filtering out pairs with low lexical overlap between the
202
- question and the context sentence. The task is to determine whether the context sentence contains
203
- the answer to the question. This modified version of the original task removes the requirement that
204
- the model select the exact answer, but also removes the simplifying assumptions that the answer
205
- is always present in the input and that lexical overlap is a reliable cue."""
206
- ), # pylint: disable=line-too-long
207
- text_features={
208
- "question": "question",
209
- "sentence": "sentence",
210
- },
211
- label_classes=["entailment", "not_entailment"],
212
- label_column="label",
213
- data_url="https://dl.fbaipublicfiles.com/glue/data/QNLIv2.zip",
214
- data_dir="QNLI",
215
- citation=textwrap.dedent(
216
- """\
217
- @article{rajpurkar2016squad,
218
- title={Squad: 100,000+ questions for machine comprehension of text},
219
- author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy},
220
- journal={arXiv preprint arXiv:1606.05250},
221
- year={2016}
222
- }"""
223
- ),
224
- url="https://rajpurkar.github.io/SQuAD-explorer/",
225
- ),
226
- AdvGlueConfig(
227
- name="adv_rte",
228
- description=textwrap.dedent(
229
- """Adversarial version of RTE.
230
- The Recognizing Textual Entailment (RTE) datasets come from a series of annual textual
231
- entailment challenges. We combine the data from RTE1 (Dagan et al., 2006), RTE2 (Bar Haim
232
- et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli et al., 2009).4 Examples are
233
- constructed based on news and Wikipedia text. We convert all datasets to a two-class split, where
234
- for three-class datasets we collapse neutral and contradiction into not entailment, for consistency."""
235
- ), # pylint: disable=line-too-long
236
- text_features={
237
- "sentence1": "sentence1",
238
- "sentence2": "sentence2",
239
- },
240
- label_classes=["entailment", "not_entailment"],
241
- label_column="label",
242
- data_url="https://dl.fbaipublicfiles.com/glue/data/RTE.zip",
243
- data_dir="RTE",
244
- citation=textwrap.dedent(
245
- """\
246
- @inproceedings{dagan2005pascal,
247
- title={The PASCAL recognising textual entailment challenge},
248
- author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo},
249
- booktitle={Machine Learning Challenges Workshop},
250
- pages={177--190},
251
- year={2005},
252
- organization={Springer}
253
- }
254
- @inproceedings{bar2006second,
255
- title={The second pascal recognising textual entailment challenge},
256
- author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan},
257
- booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment},
258
- volume={6},
259
- number={1},
260
- pages={6--4},
261
- year={2006},
262
- organization={Venice}
263
- }
264
- @inproceedings{giampiccolo2007third,
265
- title={The third pascal recognizing textual entailment challenge},
266
- author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill},
267
- booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing},
268
- pages={1--9},
269
- year={2007},
270
- organization={Association for Computational Linguistics}
271
- }
272
- @inproceedings{bentivogli2009fifth,
273
- title={The Fifth PASCAL Recognizing Textual Entailment Challenge.},
274
- author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo},
275
- booktitle={TAC},
276
- year={2009}
277
- }"""
278
- ),
279
- url="https://aclweb.org/aclwiki/Recognizing_Textual_Entailment",
280
- ),
281
- ]
282
-
283
-
284
- class AdvGlue(datasets.GeneratorBasedBuilder):
285
- """The General Language Understanding Evaluation (GLUE) benchmark."""
286
-
287
- DATASETS = ["adv_sst2", "adv_qqp", "adv_mnli", "adv_mnli_mismatched", "adv_qnli", "adv_rte"]
288
- BUILDER_CONFIGS = ADVGLUE_BUILDER_CONFIGS
289
-
290
- def _info(self):
291
- features = {text_feature: datasets.Value("string") for text_feature in self.config.text_features.keys()}
292
- if self.config.label_classes:
293
- features["label"] = datasets.features.ClassLabel(names=self.config.label_classes)
294
- else:
295
- features["label"] = datasets.Value("float32")
296
- features["idx"] = datasets.Value("int32")
297
- return datasets.DatasetInfo(
298
- description=_ADV_GLUE_DESCRIPTION,
299
- features=datasets.Features(features),
300
- homepage="https://adversarialglue.github.io/",
301
- citation=_ADV_GLUE_CITATION,
302
- )
303
-
304
- def _split_generators(self, dl_manager):
305
- assert self.config.name in AdvGlue.DATASETS
306
- data_dir = dl_manager.download_and_extract(ADVGLUE_DEV_URL)
307
- data_file = os.path.join(data_dir, "dev", "dev.json")
308
- return [
309
- datasets.SplitGenerator(
310
- name=datasets.Split.VALIDATION,
311
- gen_kwargs={
312
- "data_file": data_file,
313
- },
314
- )
315
- ]
316
-
317
- def _generate_examples(self, data_file):
318
- # We name splits 'adv_sst2' instead of 'sst2' so as not to be confused
319
- # with the original SST-2. Here they're named like 'sst2' so we have to
320
- # remove the 'adv_' prefix.
321
- config_key = self.config.name.replace("adv_", "")
322
- if config_key == "mnli_mismatched":
323
- # and they name this split differently.
324
- config_key = "mnli-mm"
325
- data = json.loads(open(data_file).read())
326
- for row in data[config_key]:
327
- example = {feat: row[col] for feat, col in self.config.text_features.items()}
328
- example["label"] = self.config.process_label(row[self.config.label_column])
329
- example["idx"] = row["idx"]
330
- yield example["idx"], example
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
adv_mnli/adv_glue-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:54d2d5c6ec417d8c9a7cd948dd1387ee689c8927e7ab6cce5814dab47eb17f5a
3
+ size 13484
adv_mnli_mismatched/adv_glue-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94c4dafe6b82b3ae18a9ff4ceb4080cf155223df086f13082e3bcca90cbf4690
3
+ size 25165
adv_qnli/adv_glue-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebc55f2e14cf3b5a770059e1a7c146f893116de2c8f7de8d355394a69823bda2
3
+ size 19110
adv_qqp/adv_glue-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9be41c092140a69324a84a6bcff86fc454532a978e42b735af0ad5a738e8d0e2
3
+ size 7704
adv_rte/adv_glue-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5805a23ad530697696e1dcf66a0cf9f06e987704011dfead45e5377fa1077f5
3
+ size 15871
adv_sst2/adv_glue-validation.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fc83000736b294028b987f9c5e724f8bac1920a45e5592882e73bc4820ac6b3
3
+ size 10832
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"adv_sst2": {"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n", "citation": "@article{Wang2021AdversarialGA,\n title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},\n author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},\n journal={ArXiv},\n year={2021},\n volume={abs/2111.02840}\n}\n", "homepage": "https://adversarialglue.github.io/", "license": "", "features": {"sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["negative", "positive"], "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "adv_glue", "config_name": "adv_sst2", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 16595, "num_examples": 148, "dataset_name": "adv_glue"}}, "download_checksums": {"https://adversarialglue.github.io/dataset/dev.zip": {"num_bytes": 40662, "checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"}}, "download_size": 40662, "post_processing_size": null, "dataset_size": 16595, "size_in_bytes": 57257}, "adv_qqp": {"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n", "citation": "@article{Wang2021AdversarialGA,\n title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},\n author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},\n journal={ArXiv},\n year={2021},\n volume={abs/2111.02840}\n}\n", "homepage": "https://adversarialglue.github.io/", "license": "", "features": {"question1": {"dtype": "string", "id": null, "_type": "Value"}, "question2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["not_duplicate", "duplicate"], "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "adv_glue", "config_name": "adv_qqp", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 9926, "num_examples": 78, "dataset_name": "adv_glue"}}, "download_checksums": {"https://adversarialglue.github.io/dataset/dev.zip": {"num_bytes": 40662, "checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"}}, "download_size": 40662, "post_processing_size": null, "dataset_size": 9926, "size_in_bytes": 50588}, "adv_mnli": {"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n", "citation": "@article{Wang2021AdversarialGA,\n title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},\n author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},\n journal={ArXiv},\n year={2021},\n volume={abs/2111.02840}\n}\n", "homepage": "https://adversarialglue.github.io/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "adv_glue", "config_name": "adv_mnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 23736, "num_examples": 121, "dataset_name": "adv_glue"}}, "download_checksums": {"https://adversarialglue.github.io/dataset/dev.zip": {"num_bytes": 40662, "checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"}}, "download_size": 40662, "post_processing_size": null, "dataset_size": 23736, "size_in_bytes": 64398}, "adv_mnli_mismatched": {"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n", "citation": "@article{Wang2021AdversarialGA,\n title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},\n author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},\n journal={ArXiv},\n year={2021},\n volume={abs/2111.02840}\n}\n", "homepage": "https://adversarialglue.github.io/", "license": "", "features": {"premise": {"dtype": "string", "id": null, "_type": "Value"}, "hypothesis": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 3, "names": ["entailment", "neutral", "contradiction"], "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "adv_glue", "config_name": "adv_mnli_mismatched", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 40982, "num_examples": 162, "dataset_name": "adv_glue"}}, "download_checksums": {"https://adversarialglue.github.io/dataset/dev.zip": {"num_bytes": 40662, "checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"}}, "download_size": 40662, "post_processing_size": null, "dataset_size": 40982, "size_in_bytes": 81644}, "adv_qnli": {"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n", "citation": "@article{Wang2021AdversarialGA,\n title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},\n author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},\n journal={ArXiv},\n year={2021},\n volume={abs/2111.02840}\n}\n", "homepage": "https://adversarialglue.github.io/", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "adv_glue", "config_name": "adv_qnli", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 34877, "num_examples": 148, "dataset_name": "adv_glue"}}, "download_checksums": {"https://adversarialglue.github.io/dataset/dev.zip": {"num_bytes": 40662, "checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"}}, "download_size": 40662, "post_processing_size": null, "dataset_size": 34877, "size_in_bytes": 75539}, "adv_rte": {"description": "Adversarial GLUE Benchmark (AdvGLUE) is a comprehensive robustness evaluation benchmark\nthat focuses on the adversarial robustness evaluation of language models. It covers five\nnatural language understanding tasks from the famous GLUE tasks and is an adversarial\nversion of GLUE benchmark.\n", "citation": "@article{Wang2021AdversarialGA,\n title={Adversarial GLUE: A Multi-Task Benchmark for Robustness Evaluation of Language Models},\n author={Boxin Wang and Chejian Xu and Shuohang Wang and Zhe Gan and Yu Cheng and Jianfeng Gao and Ahmed Hassan Awadallah and B. Li},\n journal={ArXiv},\n year={2021},\n volume={abs/2111.02840}\n}\n", "homepage": "https://adversarialglue.github.io/", "license": "", "features": {"sentence1": {"dtype": "string", "id": null, "_type": "Value"}, "sentence2": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["entailment", "not_entailment"], "id": null, "_type": "ClassLabel"}, "idx": {"dtype": "int32", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "adv_glue", "config_name": "adv_rte", "version": {"version_str": "1.0.0", "description": "", "major": 1, "minor": 0, "patch": 0}, "splits": {"validation": {"name": "validation", "num_bytes": 25998, "num_examples": 81, "dataset_name": "adv_glue"}}, "download_checksums": {"https://adversarialglue.github.io/dataset/dev.zip": {"num_bytes": 40662, "checksum": "efb4cbd3aa4a87bfaffc310ae951981cc0a36c6c71c6425dd74e5b55f2f325c9"}}, "download_size": 40662, "post_processing_size": null, "dataset_size": 25998, "size_in_bytes": 66660}}