metadata
annotations_creators:
- expert-generated
language_creators:
- expert-generated
language:
- en
multilinguality:
- monolingual
size_categories:
- n<1K
source_datasets:
- original
task_categories:
- multiple-choice
- text-generation
- question-answering
task_ids:
- multiple-choice-qa
- language-modeling
- open-domain-qa
pretty_name: BBEH Disambiguation QA
dataset_info:
- config_name: generation
features:
- name: question
dtype: string
- name: best_answer
dtype: string
splits:
- name: validation
num_examples: 120
- config_name: multiple_choice
features:
- name: question
dtype: string
- name: mc1_targets
struct:
- name: choices
sequence: string
- name: labels
sequence: int32
splits:
- name: validation
num_examples: 120
configs:
- config_name: generation
data_files:
- split: validation
path: generation/bbeh-disambiguation-qa-generation.parquet
- config_name: multiple-choice
data_files:
- split: validation
path: multiple-choice/bbeh-disambiguation-qa-multiple-choice.parquet
Reference
@article{kazemi2025big,
title={Big-bench extra hard},
author={Kazemi, Mehran and Fatemi, Bahare and Bansal, Hritik and Palowitch, John and Anastasiou, Chrysovalantis and Mehta, Sanket Vaibhav and Jain, Lalit K and Aglietti, Virginia and Jindal, Disha and Chen, Peter and others},
journal={arXiv preprint arXiv:2502.19187},
year={2025}
}