Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
Kenneth Enevoldsen
commited on
reformatted tests for datasheets
Browse files- data/wiki/descriptive_stats.json +1 -2
- data/wiki/images/dist_document_length.png +2 -2
- data/wiki/wiki.md +12 -13
- src/dynaword/datasheet.py +4 -2
- src/dynaword/tables.py +1 -2
- src/dynaword/update_descriptive_statistics.py +3 -2
- src/tests/test_dataset_schema.py +0 -26
- src/tests/test_datasheets.py +47 -0
data/wiki/descriptive_stats.json
CHANGED
@@ -2,6 +2,5 @@
|
|
2 |
"number_of_samples": 264433,
|
3 |
"average_document_length": 1386.9819878759458,
|
4 |
"number_of_tokens": 122002149,
|
5 |
-
"
|
6 |
-
"revision": "341a0162905ca4848fc840d941edfd0a6d1b2314"
|
7 |
}
|
|
|
2 |
"number_of_samples": 264433,
|
3 |
"average_document_length": 1386.9819878759458,
|
4 |
"number_of_tokens": 122002149,
|
5 |
+
"revision": "8000194fcf5dfc965cf377344a0acbe6fc8df8af"
|
|
|
6 |
}
|
data/wiki/images/dist_document_length.png
CHANGED
![]() |
Git LFS Details
|
![]() |
Git LFS Details
|
data/wiki/wiki.md
CHANGED
@@ -1,20 +1,20 @@
|
|
1 |
---
|
2 |
pretty_name: Wikipedia
|
3 |
language:
|
4 |
-
|
5 |
license: cc0-1.0
|
6 |
license_name: CC-0
|
7 |
size_categories:
|
8 |
-
|
9 |
task_categories:
|
10 |
-
|
11 |
-
|
12 |
task_ids:
|
13 |
-
|
14 |
source_datasets:
|
15 |
-
|
16 |
domains:
|
17 |
-
|
18 |
---
|
19 |
|
20 |
# Dataset Card for Wikipedia
|
@@ -31,6 +31,7 @@ You can read more about wikipedia on their [about](https://en.wikipedia.org/wiki
|
|
31 |
|
32 |
<!-- START-DESC-STATS -->
|
33 |
- **Language**: dan, dansk, Danish
|
|
|
34 |
- **Number of samples**: 264.43K
|
35 |
- **Number of tokens (Llama 3)**: 122.00M
|
36 |
- **Average document length (characters)**: 1386.98
|
@@ -59,22 +60,20 @@ An example from the dataset looks as follows.
|
|
59 |
|
60 |
An entry in the dataset consists of the following fields:
|
61 |
|
|
|
62 |
- `text`(`str`): The content of the document.
|
63 |
- `source` (`str`): The source of the document (see [Source Data](#source-data)).
|
64 |
-
- `id` (`str`): An unique identifier for each document.
|
65 |
- `added` (`str`): An date for when the document was added to this collection.
|
66 |
- `created` (`str`): An date range for when the document was originally created.
|
67 |
-
- `
|
68 |
-
- `domain` (`str`): The domain of the source
|
69 |
-
- `metadata/source-pretty` (`str`): The long form version of the short-form source name
|
70 |
-
- `metadata/*`: Potentially additional metadata
|
71 |
<!-- END-SAMPLE -->
|
72 |
|
73 |
### Dataset Statistics
|
74 |
|
75 |
<!-- START-DATASET PLOTS -->
|
|
|
76 |
<img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
|
77 |
-
|
78 |
<!-- END-DATASET PLOTS -->
|
79 |
|
80 |
|
|
|
1 |
---
|
2 |
pretty_name: Wikipedia
|
3 |
language:
|
4 |
+
- da
|
5 |
license: cc0-1.0
|
6 |
license_name: CC-0
|
7 |
size_categories:
|
8 |
+
- 100k-1M
|
9 |
task_categories:
|
10 |
+
- text-generation
|
11 |
+
- fill-mask
|
12 |
task_ids:
|
13 |
+
- language-modeling
|
14 |
source_datasets:
|
15 |
+
- danish-foundation-models/danish-gigaword
|
16 |
domains:
|
17 |
+
- Encyclopedic
|
18 |
---
|
19 |
|
20 |
# Dataset Card for Wikipedia
|
|
|
31 |
|
32 |
<!-- START-DESC-STATS -->
|
33 |
- **Language**: dan, dansk, Danish
|
34 |
+
- **Domains**: ['Encyclopedic']
|
35 |
- **Number of samples**: 264.43K
|
36 |
- **Number of tokens (Llama 3)**: 122.00M
|
37 |
- **Average document length (characters)**: 1386.98
|
|
|
60 |
|
61 |
An entry in the dataset consists of the following fields:
|
62 |
|
63 |
+
- `id` (`str`): An unique identifier for each document.
|
64 |
- `text`(`str`): The content of the document.
|
65 |
- `source` (`str`): The source of the document (see [Source Data](#source-data)).
|
|
|
66 |
- `added` (`str`): An date for when the document was added to this collection.
|
67 |
- `created` (`str`): An date range for when the document was originally created.
|
68 |
+
- `token_count` (`int`): The number of tokens in the sample computed using the Llama 8B tokenizer
|
|
|
|
|
|
|
69 |
<!-- END-SAMPLE -->
|
70 |
|
71 |
### Dataset Statistics
|
72 |
|
73 |
<!-- START-DATASET PLOTS -->
|
74 |
+
<p align="center">
|
75 |
<img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
|
76 |
+
</p>
|
77 |
<!-- END-DATASET PLOTS -->
|
78 |
|
79 |
|
src/dynaword/datasheet.py
CHANGED
@@ -50,6 +50,7 @@ An entry in the dataset consists of the following fields:
|
|
50 |
- `token_count` (`int`): The number of tokens in the sample computed using the Llama 8B tokenizer
|
51 |
"""
|
52 |
|
|
|
53 |
def human_readable_large_int(value: int) -> str:
|
54 |
thresholds = [
|
55 |
(1_000_000_000, "B"),
|
@@ -127,13 +128,14 @@ class DataSheet(BaseModel):
|
|
127 |
|
128 |
if next_is_end_section:
|
129 |
end_header = _header
|
|
|
130 |
|
131 |
if next_is_end_section is None:
|
132 |
raise ValueError(f"The header '{header}' is not found in the text.")
|
133 |
|
134 |
start_idx = self.body.find(header)
|
135 |
if end_header:
|
136 |
-
end_idx = self.body.find(
|
137 |
else:
|
138 |
end_idx = len(self.body)
|
139 |
|
@@ -149,7 +151,7 @@ class DataSheet(BaseModel):
|
|
149 |
for level in levels:
|
150 |
if text.startswith("#" * level):
|
151 |
return True
|
152 |
-
return
|
153 |
|
154 |
return [line for line in self.body.splitlines() if __contains_level(line)]
|
155 |
|
|
|
50 |
- `token_count` (`int`): The number of tokens in the sample computed using the Llama 8B tokenizer
|
51 |
"""
|
52 |
|
53 |
+
|
54 |
def human_readable_large_int(value: int) -> str:
|
55 |
thresholds = [
|
56 |
(1_000_000_000, "B"),
|
|
|
128 |
|
129 |
if next_is_end_section:
|
130 |
end_header = _header
|
131 |
+
break
|
132 |
|
133 |
if next_is_end_section is None:
|
134 |
raise ValueError(f"The header '{header}' is not found in the text.")
|
135 |
|
136 |
start_idx = self.body.find(header)
|
137 |
if end_header:
|
138 |
+
end_idx = self.body[start_idx:].find(end_header) + start_idx
|
139 |
else:
|
140 |
end_idx = len(self.body)
|
141 |
|
|
|
151 |
for level in levels:
|
152 |
if text.startswith("#" * level):
|
153 |
return True
|
154 |
+
return False
|
155 |
|
156 |
return [line for line in self.body.splitlines() if __contains_level(line)]
|
157 |
|
src/dynaword/tables.py
CHANGED
@@ -6,10 +6,9 @@ from dynaword.datasheet import DataSheet, human_readable_large_int
|
|
6 |
from dynaword.paths import repo_path
|
7 |
|
8 |
main_sheet = DataSheet.load_from_path(repo_path / "README.md")
|
9 |
-
frontmatter, _ = main_sheet.frontmatter
|
10 |
_datasets = [
|
11 |
cfg["config_name"] # type: ignore
|
12 |
-
for cfg in frontmatter["configs"] # type: ignore
|
13 |
if cfg["config_name"] != "default" # type: ignore
|
14 |
]
|
15 |
|
|
|
6 |
from dynaword.paths import repo_path
|
7 |
|
8 |
main_sheet = DataSheet.load_from_path(repo_path / "README.md")
|
|
|
9 |
_datasets = [
|
10 |
cfg["config_name"] # type: ignore
|
11 |
+
for cfg in main_sheet.frontmatter["configs"] # type: ignore
|
12 |
if cfg["config_name"] != "default" # type: ignore
|
13 |
]
|
14 |
|
src/dynaword/update_descriptive_statistics.py
CHANGED
@@ -27,10 +27,9 @@ from dynaword.tables import create_overview_table_str
|
|
27 |
logger = logging.getLogger(__name__)
|
28 |
|
29 |
main_sheet = DataSheet.load_from_path(repo_path / "README.md")
|
30 |
-
frontmatter, _ = main_sheet.frontmatter
|
31 |
_datasets = [
|
32 |
cfg["config_name"] # type: ignore
|
33 |
-
for cfg in frontmatter["configs"] # type: ignore
|
34 |
if cfg["config_name"] != "default" # type: ignore
|
35 |
]
|
36 |
|
@@ -81,6 +80,8 @@ def update_dataset(
|
|
81 |
package = create_overview_table_str()
|
82 |
sheet.body = sheet.replace_tag(package=package, tag="MAIN TABLE")
|
83 |
|
|
|
|
|
84 |
|
85 |
def create_parser():
|
86 |
parser = argparse.ArgumentParser(
|
|
|
27 |
logger = logging.getLogger(__name__)
|
28 |
|
29 |
main_sheet = DataSheet.load_from_path(repo_path / "README.md")
|
|
|
30 |
_datasets = [
|
31 |
cfg["config_name"] # type: ignore
|
32 |
+
for cfg in main_sheet.frontmatter["configs"] # type: ignore
|
33 |
if cfg["config_name"] != "default" # type: ignore
|
34 |
]
|
35 |
|
|
|
80 |
package = create_overview_table_str()
|
81 |
sheet.body = sheet.replace_tag(package=package, tag="MAIN TABLE")
|
82 |
|
83 |
+
sheet.write_to_path()
|
84 |
+
|
85 |
|
86 |
def create_parser():
|
87 |
parser = argparse.ArgumentParser(
|
src/tests/test_dataset_schema.py
CHANGED
@@ -19,32 +19,6 @@ def test_sample_schema(dataset_name: str):
|
|
19 |
SampleSchema(**sample)
|
20 |
|
21 |
|
22 |
-
@pytest.mark.parametrize("dataset_name", DATASET_NAMES)
|
23 |
-
def test_dataset_readme(dataset_name: str):
|
24 |
-
"""tests that the dataset frontmatter and markdown follows the correct format."""
|
25 |
-
|
26 |
-
readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
|
27 |
-
|
28 |
-
# ensure that it can be loaded as a datasheet
|
29 |
-
ds_sheet = DataSheet.load_from_path(readme) # fill fail if format is not correct
|
30 |
-
|
31 |
-
# ensure tags:
|
32 |
-
body = ds_sheet.body
|
33 |
-
tags = [v.value for v in DEFAULT_SECTION_TAGS]
|
34 |
-
for tag in tags:
|
35 |
-
ds_sheet.get_tag_idx(tag)
|
36 |
-
|
37 |
-
h2_headings = {line for line in body.splitlines() if line.startswith("## ")}
|
38 |
-
|
39 |
-
if ds_sheet.license == "other": # ensure description of underspecified licenses
|
40 |
-
assert "## License Information" in h2_headings
|
41 |
-
|
42 |
-
# required headings
|
43 |
-
req_h2_headings = ["## Dataset Description", "## Additional Information"]
|
44 |
-
for req_h2 in req_h2_headings:
|
45 |
-
assert req_h2 in h2_headings
|
46 |
-
pass
|
47 |
-
|
48 |
|
49 |
@pytest.mark.parametrize("dataset_name", DATASET_NAMES)
|
50 |
def test_dataset_folder_structure(dataset_name: str):
|
|
|
19 |
SampleSchema(**sample)
|
20 |
|
21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
@pytest.mark.parametrize("dataset_name", DATASET_NAMES)
|
24 |
def test_dataset_folder_structure(dataset_name: str):
|
src/tests/test_datasheets.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from dynaword.datasheet import DEFAULT_SECTION_TAGS, DataSheet
|
4 |
+
from dynaword.paths import repo_path
|
5 |
+
|
6 |
+
from .conftest import DATASET_NAMES
|
7 |
+
|
8 |
+
|
9 |
+
@pytest.mark.parametrize("dataset_name", DATASET_NAMES)
|
10 |
+
def test_datasheet_load(dataset_name: str):
|
11 |
+
"""tests that the dataset frontmatter and markdown follows the correct format."""
|
12 |
+
|
13 |
+
readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
|
14 |
+
ds_sheet = DataSheet.load_from_path(readme) # will fail if format is not correct
|
15 |
+
|
16 |
+
|
17 |
+
@pytest.mark.parametrize("dataset_name", DATASET_NAMES)
|
18 |
+
def test_datasheet_content_tags(dataset_name: str):
|
19 |
+
readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
|
20 |
+
ds_sheet = DataSheet.load_from_path(readme)
|
21 |
+
|
22 |
+
# ensure tags:
|
23 |
+
body = ds_sheet.body
|
24 |
+
tags = [v.value for v in DEFAULT_SECTION_TAGS]
|
25 |
+
for tag in tags:
|
26 |
+
ds_sheet.get_tag_idx(tag)
|
27 |
+
|
28 |
+
|
29 |
+
@pytest.mark.parametrize("dataset_name", DATASET_NAMES)
|
30 |
+
def test_datasheet_license_info(dataset_name: str):
|
31 |
+
"""Ensure that license information is present is license is other"""
|
32 |
+
readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
|
33 |
+
ds_sheet = DataSheet.load_from_path(readme)
|
34 |
+
|
35 |
+
if ds_sheet.license == "other": # ensure description of underspecified licenses
|
36 |
+
assert ds_sheet.license_information.strip()
|
37 |
+
assert ds_sheet.license_name
|
38 |
+
|
39 |
+
|
40 |
+
@pytest.mark.parametrize("dataset_name", DATASET_NAMES)
|
41 |
+
def test_datasheet_required_headings(dataset_name: str):
|
42 |
+
readme = repo_path / "data" / dataset_name / f"{dataset_name}.md"
|
43 |
+
ds_sheet = DataSheet.load_from_path(readme)
|
44 |
+
|
45 |
+
req_h2_headings = ["## Dataset Description", "## Additional Information"]
|
46 |
+
for req_h2 in req_h2_headings:
|
47 |
+
assert ds_sheet.get_section_by_header(req_h2)
|