|
{ |
|
"@context": { |
|
"@language": "en", |
|
"@vocab": "https://schema.org/", |
|
"citeAs": "cr:citeAs", |
|
"column": "cr:column", |
|
"conformsTo": "dct:conformsTo", |
|
"cr": "http://mlcommons.org/croissant/", |
|
"rai": "http://mlcommons.org/croissant/RAI/", |
|
"data": { |
|
"@id": "cr:data", |
|
"@type": "@json" |
|
}, |
|
"dataType": { |
|
"@id": "cr:dataType", |
|
"@type": "@vocab" |
|
}, |
|
"dct": "http://purl.org/dc/terms/", |
|
"examples": { |
|
"@id": "cr:examples", |
|
"@type": "@json" |
|
}, |
|
"extract": "cr:extract", |
|
"field": "cr:field", |
|
"fileProperty": "cr:fileProperty", |
|
"fileObject": "cr:fileObject", |
|
"fileSet": "cr:fileSet", |
|
"format": "cr:format", |
|
"includes": "cr:includes", |
|
"isLiveDataset": "cr:isLiveDataset", |
|
"jsonPath": "cr:jsonPath", |
|
"key": "cr:key", |
|
"md5": "cr:md5", |
|
"parentField": "cr:parentField", |
|
"path": "cr:path", |
|
"recordSet": "cr:recordSet", |
|
"references": "cr:references", |
|
"regex": "cr:regex", |
|
"repeated": "cr:repeated", |
|
"replace": "cr:replace", |
|
"sc": "https://schema.org/", |
|
"separator": "cr:separator", |
|
"source": "cr:source", |
|
"subField": "cr:subField", |
|
"transform": "cr:transform" |
|
}, |
|
"@type": "sc:Dataset", |
|
"name": "seqBench", |
|
"description": "\nSeqBench is a tunable benchmark designed to probe and analyze sequential reasoning \ncapabilities in language models. This compact version contains instances with varied \ncomplexity. Each instance provides:\n- 'context': NLP problem.\n- 'completion': Solution.\n- 'complexity_parameters': A dictionary with L, B, N.\n- 'instance_metadata': Maze dimensions and agent/target names.\n- 'structural_details': Rich structural information (as a JSON string) including room mappings, \n adjacency lists, door/key details, and canonical facts for the underlying base maze.\n", |
|
"conformsTo": "http://mlcommons.org/croissant/1.0", |
|
"citeAs": "@misc{anonymous2025seqbench,\n author = {Anonymous Submission},\n title = {SeqBench: A Tunable Benchmark to Quantify Sequential Reasoning Limits of LLMs},\n year = {2025},\n publisher = {Proceedings of the Conference on Empirical Methods in Natural Language Processing},\n note = {Special Theme: Interdisciplinary Recontextualization of NLP},\n comment = {Dataset accessible at https://huggingface.co/datasets/emnlp-submission/seqBench}\n}", |
|
"license": "https://creativecommons.org/licenses/by/4.0/", |
|
"url": "https://huggingface.co/datasets/emnlp-submission/seqBench", |
|
"distribution": [ |
|
{ |
|
"@type": "cr:FileObject", |
|
"@id": "seqbench-jsonl-gz-file", |
|
"name": "seqbench-jsonl-gz-file", |
|
"description": "The main benchmark data file in gzipped JSONL format.", |
|
"contentUrl": "https://huggingface.co/datasets/emnlp-submission/seqBench/resolve/main/seqBench_compact.jsonl.gz", |
|
"encodingFormat": "application/gzip", |
|
"sha256": "2b58a2f7b65def3445f2572726d5ff30d5ecab9cc841fffcb57a8bceb937b0c3" |
|
} |
|
], |
|
"recordSet": [ |
|
{ |
|
"@type": "cr:RecordSet", |
|
"@id": "seqbench-records", |
|
"name": "seqbench-records", |
|
"description": "Individual instances from the SeqBench benchmark (extracted from gzipped JSONL).", |
|
"field": [ |
|
{ |
|
"@type": "cr:Field", |
|
"@id": "context", |
|
"name": "context", |
|
"dataType": "sc:Text", |
|
"source": { |
|
"fileObject": { |
|
"@id": "seqbench-jsonl-gz-file" |
|
}, |
|
"extract": { |
|
"column": "context" |
|
} |
|
} |
|
}, |
|
{ |
|
"@type": "cr:Field", |
|
"@id": "completion", |
|
"name": "completion", |
|
"dataType": "sc:Text", |
|
"source": { |
|
"fileObject": { |
|
"@id": "seqbench-jsonl-gz-file" |
|
}, |
|
"extract": { |
|
"column": "completion" |
|
} |
|
} |
|
}, |
|
{ |
|
"@type": "cr:Field", |
|
"@id": "logical_depth_L", |
|
"name": "logical_depth_L", |
|
"dataType": "sc:Integer", |
|
"source": { |
|
"fileObject": { |
|
"@id": "seqbench-jsonl-gz-file" |
|
}, |
|
"extract": { |
|
"jsonPath": "complexity_parameters.logical_depth_L" |
|
} |
|
} |
|
}, |
|
{ |
|
"@type": "cr:Field", |
|
"@id": "backtracking_count_B", |
|
"name": "backtracking_count_B", |
|
"dataType": "sc:Integer", |
|
"source": { |
|
"fileObject": { |
|
"@id": "seqbench-jsonl-gz-file" |
|
}, |
|
"extract": { |
|
"jsonPath": "complexity_parameters.backtracking_count_B" |
|
} |
|
} |
|
}, |
|
{ |
|
"@type": "cr:Field", |
|
"@id": "noise_ratio_N", |
|
"name": "noise_ratio_N", |
|
"dataType": "sc:Float", |
|
"source": { |
|
"fileObject": { |
|
"@id": "seqbench-jsonl-gz-file" |
|
}, |
|
"extract": { |
|
"jsonPath": "complexity_parameters.noise_ratio_N" |
|
} |
|
} |
|
}, |
|
{ |
|
"@type": "cr:Field", |
|
"@id": "instance_id", |
|
"name": "instance_id", |
|
"dataType": "sc:Text", |
|
"source": { |
|
"fileObject": { |
|
"@id": "seqbench-jsonl-gz-file" |
|
}, |
|
"extract": { |
|
"column": "instance_id" |
|
} |
|
} |
|
}, |
|
{ |
|
"@type": "cr:Field", |
|
"@id": "full_instance_metadata", |
|
"name": "full_instance_metadata", |
|
"description": "Full instance metadata as a JSON string (contains maze_rows, maze_cols, agent_name, target_name).", |
|
"dataType": "sc:Text", |
|
"source": { |
|
"fileObject": { |
|
"@id": "seqbench-jsonl-gz-file" |
|
}, |
|
"extract": { |
|
"column": "instance_metadata" |
|
} |
|
} |
|
}, |
|
{ |
|
"@type": "cr:Field", |
|
"@id": "structural_details_json", |
|
"name": "structural_details_json", |
|
"description": "Full structural details as a JSON string.", |
|
"dataType": "sc:Text", |
|
"source": { |
|
"fileObject": { |
|
"@id": "seqbench-jsonl-gz-file" |
|
}, |
|
"extract": { |
|
"column": "structural_details" |
|
} |
|
} |
|
} |
|
] |
|
} |
|
] |
|
} |
|
|