Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
File size: 619 Bytes
7ed5f43 78108d3 7ed5f43 78108d3 7ed5f43 78108d3 7ed5f43 78108d3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 |
from pathlib import Path
from datasets import load_dataset
def test_dataset_loads():
"""Ensures that the dataset can load as intended"""
repo = Path(__file__).parent.parent
name = str(repo.resolve())
ds = load_dataset(name, split="train", streaming=True)
sample = next(iter(ds))
assert isinstance(sample, dict)
# def test_all_datasets_in_yaml(repo_path: Path, readme_yaml_header: dict[str, Any]):
# configs = readme_yaml_header["configs"]
# data_folder = repo_path / "data"
# datasets = data_folder.glob("*")
# for dataset in datasets:
# assert dataset in configs
|