Files changed (43) hide show
  1. .gitignore +6 -0
  2. .vscode/settings.json +0 -1
  3. CHANGELOG.md +24 -0
  4. CONTRIBUTING.md +6 -1
  5. README.md +67 -30
  6. data/ai-aktindsigt/ai-aktindsigt.md +90 -0
  7. data/ai-aktindsigt/ai-aktindsigt.parquet +3 -0
  8. data/ai-aktindsigt/create.py +64 -0
  9. data/ai-aktindsigt/descriptive_stats.json +7 -0
  10. data/ai-aktindsigt/images/dist_document_length.png +3 -0
  11. data/cellar/cellar.md +82 -0
  12. data/cellar/cellar.parquet +3 -0
  13. data/cellar/create.py +60 -0
  14. data/cellar/descriptive_stats.json +7 -0
  15. data/cellar/images/dist_document_length.png +3 -0
  16. data/danske-taler/create.py +227 -0
  17. data/danske-taler/danske-taler.log +57 -0
  18. data/danske-taler/danske-taler.md +138 -0
  19. data/danske-taler/danske-taler.parquet +3 -0
  20. data/danske-taler/descriptive_stats.json +7 -0
  21. data/danske-taler/images/dist_document_length.png +3 -0
  22. data/eur-lex-sum-da/create.py +50 -0
  23. data/eur-lex-sum-da/descriptive_stats.json +7 -0
  24. data/eur-lex-sum-da/eur-lex-sum-da.md +86 -0
  25. data/eur-lex-sum-da/eur-lex-sum-da.parquet +3 -0
  26. data/eur-lex-sum-da/images/dist_document_length.png +3 -0
  27. data/fm-udgivelser/create.py +50 -0
  28. data/fm-udgivelser/descriptive_stats.json +7 -0
  29. data/fm-udgivelser/fm-udgivelser.md +92 -0
  30. data/fm-udgivelser/fm-udgivelser.parquet +3 -0
  31. data/fm-udgivelser/images/dist_document_length.png +3 -0
  32. data/miljoeportalen/create.py +50 -0
  33. data/miljoeportalen/descriptive_stats.json +7 -0
  34. data/miljoeportalen/images/dist_document_length.png +3 -0
  35. data/miljoeportalen/miljoeportalen.md +103 -0
  36. data/miljoeportalen/miljoeportalen.parquet +3 -0
  37. pyproject.toml +1 -1
  38. src/bump_version.py +2 -1
  39. src/tests/test_dataset_schema.py +1 -1
  40. src/tests/test_load.py +3 -1
  41. src/update_descriptive_statistics.py +32 -9
  42. test_results.log +13 -0
  43. uv.lock +1 -1
.gitignore CHANGED
@@ -14,3 +14,9 @@ tmp.png
14
 
15
  # MacOS
16
  .DS_Store
 
 
 
 
 
 
 
14
 
15
  # MacOS
16
  .DS_Store
17
+
18
+ # tmp files
19
+ tmp.py
20
+
21
+ ## to allow temporary data drops without pushing it to the hub
22
+ data/*/tmp/*
.vscode/settings.json CHANGED
@@ -4,5 +4,4 @@
4
  ],
5
  "python.testing.unittestEnabled": false,
6
  "python.testing.pytestEnabled": true,
7
- "makefile.configureOnOpen": false
8
  }
 
4
  ],
5
  "python.testing.unittestEnabled": false,
6
  "python.testing.pytestEnabled": true,
 
7
  }
CHANGELOG.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ # Changelog
3
+
4
+ All notable changes to this project will be documented in this file.
5
+
6
+ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
7
+
8
+ ## [v1.0.11] - 2025-03-29
9
+
10
+ ### Added
11
+
12
+ - Added new datasets (more than 1B tokens 🎉)
13
+ - AI Aktindsigt
14
+ - Cellar
15
+ - Danske Taler
16
+ - Miljøportalen
17
+ - EUR-Lex SUM
18
+ - Finansministeriets Udgivelser
19
+
20
+ ### Docs
21
+
22
+ - Sorted main table in readme
23
+ - Added Changelog
24
+ - Minor changs to
CONTRIBUTING.md CHANGED
@@ -60,11 +60,14 @@ Creating a PR on Huggingface is a bit different from creating one on Github.
60
  git fetch origin refs/pr/{PR NUMBER}:pr/{PR NUMBER}
61
  git checkout pr/{PR NUMBER}
62
  # make your changes here
 
63
  # push to hub
 
 
64
  git push origin pr/{PR NUMBER}:refs/pr/{PR NUMBER}
65
  ```
66
 
67
- Before you make the PR do be sure to make sure that you have completed the following checklist.
68
 
69
  ### Checklist
70
 
@@ -73,6 +76,8 @@ Before you make the PR do be sure to make sure that you have completed the follo
73
  - [ ] I have updated descriptive statistics using `make update-descriptive-statistics`
74
  - [ ] I have bumped the version use `make bump-version`
75
  - [ ] If I have added a `create.py` script I have added the [script dependencies](https://docs.astral.sh/uv/guides/scripts/#declaring-script-dependencies) required to run that script.
 
 
76
 
77
  ### Examples of Previous PRs
78
  To see example PR you can see the following:
 
60
  git fetch origin refs/pr/{PR NUMBER}:pr/{PR NUMBER}
61
  git checkout pr/{PR NUMBER}
62
  # make your changes here
63
+
64
  # push to hub
65
+ # you might need to first login:
66
+ # huggingface-cli login
67
  git push origin pr/{PR NUMBER}:refs/pr/{PR NUMBER}
68
  ```
69
 
70
+ Before you make the PR do be sure to make sure that you have completed the following checklist.
71
 
72
  ### Checklist
73
 
 
76
  - [ ] I have updated descriptive statistics using `make update-descriptive-statistics`
77
  - [ ] I have bumped the version use `make bump-version`
78
  - [ ] If I have added a `create.py` script I have added the [script dependencies](https://docs.astral.sh/uv/guides/scripts/#declaring-script-dependencies) required to run that script.
79
+ - [ ] I have updated the CHANGELOG.md if appropriate
80
+
81
 
82
  ### Examples of Previous PRs
83
  To see example PR you can see the following:
README.md CHANGED
@@ -5,6 +5,30 @@ configs:
5
  data_files:
6
  - split: train
7
  path: 'data/*/*.parquet'
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  - config_name: memo
9
  data_files:
10
  - split: train
@@ -134,7 +158,7 @@ https://github.com/huggingface/datasets/blob/main/templates/README_guide.md
134
  <!-- START README TABLE -->
135
  | | |
136
  | ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
137
- | **Version** | 1.0.10 |
138
  | **Language** | dan, dansk, Danish |
139
  | **License** | Permissible, See the respective dataset |
140
  | **Models** | For model trained used this data see [danish-foundation-models](https://huggingface.co/danish-foundation-models) |
@@ -275,34 +299,46 @@ This data generally contains no annotation besides the metadata attached to each
275
  Below follows a brief overview of the sources in the corpus along with their individual license.
276
 
277
  <!-- START-MAIN TABLE -->
278
- | Source | Description | N. Tokens | License |
279
- |:--------------------|:-----------------------------------------------------------------------------------------------------------------------------|:------------|:-----------------------|
280
- | [memo] | The MeMo corpus comprising almost all Danish novels from the period 1870-1899, known as the Modern Breakthrough | 9.28M | [CC-BY-SA 4.0] |
281
- | [opensubtitles] | Danish subsection of [OpenSubtitles](https://opus.nlpl.eu/OpenSubtitles/corpus/version/OpenSubtitles) | 271.60M | [CC-0] |
282
- | [retsinformationdk] | [retsinformation.dk](https://www.retsinformation.dk) (legal-information.dk) the official legal information system of Denmark | 516.54M | [Danish Copyright Law] |
283
- | [ep] | The Danish subsection of [Europarl](https://aclanthology.org/2005.mtsummit-papers.11/) | 100.89M | [CC-0] |
284
- | [ft] | Records from all meetings of The Danish parliament (Folketinget) in the parliament hall | 114.09M | [CC-0] |
285
- | [wikisource] | The Danish subsection of [Wikisource](https://en.wikisource.org/wiki/Main_Page) | 5.34M | [CC-0] |
286
- | [spont] | Conversational samples collected as a part of research projects at Aarhus University | 1.56M | [CC-0] |
287
- | [tv2r] | Contemporary Danish newswire articles published between 2010 and 2019 | 21.67M | [CC-BY-SA 4.0] |
288
- | [adl] | Danish literature from 1700-2023 from the Archive for Danish Literature (ADL) | 58.49M | [CC-0] |
289
- | [hest] | Samples from the Danish debate forum www.heste-nettet.dk | 389.33M | [CC-0] |
290
- | [skat] | Skat is the Danish tax authority. This dataset contains content from its website skat.dk | 122.12M | [CC-0] |
291
- | [dannet] | [DanNet](https://cst.ku.dk/projekter/dannet) is a Danish WordNet | 1.52M | [DanNet 1.0 License] |
292
- | [retspraksis] | Case law or judical practice in Denmark derived from [Retspraksis](https://da.wikipedia.org/wiki/Retspraksis) | 57.08M | [CC-0] |
293
- | [wikibooks] | The Danish Subsection of [Wikibooks](https://www.wikibooks.org) | 6.24M | [CC-0] |
294
- | [jvj] | The works of the Danish author and poet, [Johannes V. Jensen](https://da.wikipedia.org/wiki/Johannes_V._Jensen) | 3.55M | [CC-BY-SA 4.0] |
295
- | [gutenberg] | The Danish subsection from Project [Gutenberg](https://www.gutenberg.org) | 6.76M | [Gutenberg License] |
296
- | [botxt] | The Bornholmsk Ordbog Dictionary Projec | 847.97K | [CC-0] |
297
- | [depbank] | The Danish subsection of the [Universal Dependencies Treebank](https://github.com/UniversalDependencies/UD_Danish-DDT) | 185.45K | [CC-BY-SA 4.0] |
298
- | [naat] | Danish speeches from 1930-2022 | 286.68K | [CC-0] |
299
- | [synne] | Dataset collected from [synnejysk forening's website](https://www.synnejysk.dk), covering the Danish dialect sønderjysk | 52.51K | [CC-0] |
300
- | [wiki] | The Danish subsection of [wikipedia](https://en.wikipedia.org/wiki/Main_Page) | 122.00M | [CC-0] |
301
- | [nordjyllandnews] | Articles from the Danish Newspaper [TV2 Nord](https://www.tv2nord.dk) | 37.91M | [CC-0] |
302
- | [relig] | Danish religious text from the 1700-2022 | 1.24M | [CC-0] |
303
- | [nota] | The text only part of the [Nota lyd- og tekstdata](https://sprogteknologi.dk/dataset/nota-lyd-og-tekstdata) dataset | 7.30M | [CC-0] |
304
- | **Total** | | 1.86B | |
305
-
 
 
 
 
 
 
 
 
 
 
 
 
306
  [memo]: data/memo/memo.md
307
  [opensubtitles]: data/opensubtitles/opensubtitles.md
308
  [retsinformationdk]: data/retsinformationdk/retsinformationdk.md
@@ -331,13 +367,14 @@ Below follows a brief overview of the sources in the corpus along with their ind
331
 
332
  [CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en
333
  [CC-BY-SA 4.0]: https://creativecommons.org/licenses/by-sa/4.0/deed.en
 
334
  [Danish Copyright Law]: ./data/retsinformationdk/retsinformationdk.md#license-information
335
  [DanNet 1.0 License]: ./data/dannet/dannet.md#license-information
336
  [Gutenberg License]: ./data/gutenberg/gutenberg.md#license-information
337
  <!-- END-MAIN TABLE -->
338
 
339
 
340
- You can learn more about each dataset by pressing
341
 
342
  <!-- ### Quality Control
343
 
 
5
  data_files:
6
  - split: train
7
  path: 'data/*/*.parquet'
8
+ - config_name: ai-aktindsigt
9
+ data_files:
10
+ - split: train
11
+ path: data/ai-aktindsigt/*.parquet
12
+ - config_name: cellar
13
+ data_files:
14
+ - split: train
15
+ path: data/cellar/*.parquet
16
+ - config_name: danske-taler
17
+ data_files:
18
+ - split: train
19
+ path: data/danske-taler/*.parquet
20
+ - config_name: eur-lex-sum-da
21
+ data_files:
22
+ - split: train
23
+ path: data/eur-lex-sum-da/*.parquet
24
+ - config_name: miljoeportalen
25
+ data_files:
26
+ - split: train
27
+ path: data/miljoeportalen/*.parquet
28
+ - config_name: fm-udgivelser
29
+ data_files:
30
+ - split: train
31
+ path: data/fm-udgivelser/*.parquet
32
  - config_name: memo
33
  data_files:
34
  - split: train
 
158
  <!-- START README TABLE -->
159
  | | |
160
  | ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
161
+ | **Version** | 1.0.11 ([Changelog](/CHANGELOG.md)) |
162
  | **Language** | dan, dansk, Danish |
163
  | **License** | Permissible, See the respective dataset |
164
  | **Models** | For model trained used this data see [danish-foundation-models](https://huggingface.co/danish-foundation-models) |
 
299
  Below follows a brief overview of the sources in the corpus along with their individual license.
300
 
301
  <!-- START-MAIN TABLE -->
302
+ | Source | Description | N. Tokens | License |
303
+ |:--------------------|:----------------------------------------------------------------------------------------------------------------------------------------------|:------------|:-----------------------|
304
+ | [synne] | Dataset collected from [synnejysk forening's website](https://www.synnejysk.dk), covering the Danish dialect sønderjysk | 52.51K | [CC-0] |
305
+ | [depbank] | The Danish subsection of the [Universal Dependencies Treebank](https://github.com/UniversalDependencies/UD_Danish-DDT) | 185.45K | [CC-BY-SA 4.0] |
306
+ | [naat] | Danish speeches from 1930-2022 | 286.68K | [CC-0] |
307
+ | [botxt] | The Bornholmsk Ordbog Dictionary Projec | 847.97K | [CC-0] |
308
+ | [relig] | Danish religious text from the 1700-2022 | 1.24M | [CC-0] |
309
+ | [dannet] | [DanNet](https://cst.ku.dk/projekter/dannet) is a Danish WordNet | 1.52M | [DanNet 1.0 License] |
310
+ | [spont] | Conversational samples collected as a part of research projects at Aarhus University | 1.56M | [CC-0] |
311
+ | [jvj] | The works of the Danish author and poet, [Johannes V. Jensen](https://da.wikipedia.org/wiki/Johannes_V._Jensen) | 3.55M | [CC-BY-SA 4.0] |
312
+ | [wikisource] | The Danish subsection of [Wikisource](https://en.wikisource.org/wiki/Main_Page) | 5.34M | [CC-0] |
313
+ | [wikibooks] | The Danish Subsection of [Wikibooks](https://www.wikibooks.org) | 6.24M | [CC-0] |
314
+ | [gutenberg] | The Danish subsection from Project [Gutenberg](https://www.gutenberg.org) | 6.76M | [Gutenberg License] |
315
+ | [nota] | The text only part of the [Nota lyd- og tekstdata](https://sprogteknologi.dk/dataset/nota-lyd-og-tekstdata) dataset | 7.30M | [CC-0] |
316
+ | [danske-taler] | Danish Speeches from [dansketaler.dk](https://www.dansketaler.dk | 8.23M | [CC-0] |
317
+ | [memo] | The MeMo corpus comprising almost all Danish novels from the period 1870-1899, known as the Modern Breakthrough | 9.28M | [CC-BY-SA 4.0] |
318
+ | [tv2r] | Contemporary Danish newswire articles published between 2010 and 2019 | 21.67M | [CC-BY-SA 4.0] |
319
+ | [eur-lex-sum-da] | The Danish subsection of EUR-lex SUM consisting of EU legislation paired with professionally written summaries | 31.37M | [CC-BY-SA 4.0] |
320
+ | [nordjyllandnews] | Articles from the Danish Newspaper [TV2 Nord](https://www.tv2nord.dk) | 37.91M | [CC-0] |
321
+ | [fm-udgivelser] | The official publication series of the Danish Ministry of Finance containing economic analyses, budget proposals, and fiscal policy documents | 50.34M | [CC-BY-SA 4.0] |
322
+ | [retspraksis] | Case law or judical practice in Denmark derived from [Retspraksis](https://da.wikipedia.org/wiki/Retspraksis) | 57.08M | [CC-0] |
323
+ | [adl] | Danish literature from 1700-2023 from the Archive for Danish Literature (ADL) | 58.49M | [CC-0] |
324
+ | [ep] | The Danish subsection of [Europarl](https://aclanthology.org/2005.mtsummit-papers.11/) | 100.89M | [CC-0] |
325
+ | [ft] | Records from all meetings of The Danish parliament (Folketinget) in the parliament hall | 114.09M | [CC-0] |
326
+ | [wiki] | The Danish subsection of [wikipedia](https://en.wikipedia.org/wiki/Main_Page) | 122.00M | [CC-0] |
327
+ | [skat] | Skat is the Danish tax authority. This dataset contains content from its website skat.dk | 122.12M | [CC-0] |
328
+ | [miljoeportalen] | Data from [Danmarks Miljøportalen](https://www.miljoeportal.dk/om-danmarks-miljoeportal/) (Denmark's Environment Portal | 128.48M | [CC-0] |
329
+ | [ai-aktindsigt] | Multiple web scrapes from municipality websites collected as a part of the [AI-aktindsigt](https://ai-aktindsigt.dk) project | 139.23M | [Apache License 2.0] |
330
+ | [opensubtitles] | Danish subsection of [OpenSubtitles](https://opus.nlpl.eu/OpenSubtitles/corpus/version/OpenSubtitles) | 271.60M | [CC-0] |
331
+ | [hest] | Samples from the Danish debate forum www.heste-nettet.dk | 389.33M | [CC-0] |
332
+ | [retsinformationdk] | [retsinformation.dk](https://www.retsinformation.dk) (legal-information.dk) the official legal information system of Denmark | 516.54M | [Danish Copyright Law] |
333
+ | [cellar] | The official digital repository for European Union legal documents and open data | 1.28B | [CC-BY-SA 4.0] |
334
+ | **Total** | | 3.49B | |
335
+
336
+ [ai-aktindsigt]: data/ai-aktindsigt/ai-aktindsigt.md
337
+ [cellar]: data/cellar/cellar.md
338
+ [danske-taler]: data/danske-taler/danske-taler.md
339
+ [eur-lex-sum-da]: data/eur-lex-sum-da/eur-lex-sum-da.md
340
+ [miljoeportalen]: data/miljoeportalen/miljoeportalen.md
341
+ [fm-udgivelser]: data/fm-udgivelser/fm-udgivelser.md
342
  [memo]: data/memo/memo.md
343
  [opensubtitles]: data/opensubtitles/opensubtitles.md
344
  [retsinformationdk]: data/retsinformationdk/retsinformationdk.md
 
367
 
368
  [CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en
369
  [CC-BY-SA 4.0]: https://creativecommons.org/licenses/by-sa/4.0/deed.en
370
+ [Apache 2.0]: https://www.apache.org/licenses/LICENSE-2.0
371
  [Danish Copyright Law]: ./data/retsinformationdk/retsinformationdk.md#license-information
372
  [DanNet 1.0 License]: ./data/dannet/dannet.md#license-information
373
  [Gutenberg License]: ./data/gutenberg/gutenberg.md#license-information
374
  <!-- END-MAIN TABLE -->
375
 
376
 
377
+ You can learn more about each dataset by pressing the link.
378
 
379
  <!-- ### Quality Control
380
 
data/ai-aktindsigt/ai-aktindsigt.md ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: AI Aktindsigt
3
+ language:
4
+ - da
5
+ license: apache-2.0
6
+ license_name: Apache License 2.0
7
+ task_categories:
8
+ - text-generation
9
+ - fill-mask
10
+ task_ids:
11
+ - language-modeling
12
+ source_datasets:
13
+ - AI-aktindsigt/Skrabet_kommunale_hjemmesider
14
+ ---
15
+
16
+ # Dataset Card for AI Aktindsigt
17
+
18
+ <!-- START-SHORT DESCRIPTION -->
19
+ Multiple web scrapes from municipality websites collected as a part of the [AI-aktindsigt](https://ai-aktindsigt.dk) project.
20
+ <!-- END-SHORT DESCRIPTION -->
21
+
22
+ The dataset consists of multiple scrapes of municipal websites compiled in connection with the work on the [AI-aktindsigt](https://ai-aktindsigt.dk) project. The scrape is made across different domains from several different municipalities.
23
+
24
+ ## Dataset Description
25
+
26
+
27
+ <!-- START-DESC-STATS -->
28
+ - **Language**: dan, dansk, Danish
29
+ - **Number of samples**: 200.91K
30
+ - **Number of tokens (Llama 3)**: 139.23M
31
+ - **Average document length (characters)**: 2030.75
32
+ <!-- END-DESC-STATS -->
33
+
34
+
35
+ ## Dataset Structure
36
+ An example from the dataset looks as follows.
37
+
38
+
39
+ <!-- START-SAMPLE -->
40
+ ```py
41
+ {
42
+ "text": "Vallensbæk Stationstorv 100 2665 Vallensbæk Strand Telefon: +45 4797 4000",
43
+ "source": "ai-aktindsigt",
44
+ "id": "ai-aktindsigt_0",
45
+ "added": "2025-03-24",
46
+ "created": "2010-01-01, 2024-03-18",
47
+ "license": "Apache-2.0",
48
+ "domain": "Web",
49
+ "metadata": {
50
+ "source-pretty": "AI Aktindsigt"
51
+ }
52
+ }
53
+ ```
54
+
55
+ ### Data Fields
56
+
57
+ An entry in the dataset consists of the following fields:
58
+
59
+ - `text`(`str`): The content of the document.
60
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
61
+ - `id` (`str`): An unique identifier for each document.
62
+ - `added` (`str`): An date for when the document was added to this collection.
63
+ - `created` (`str`): An date range for when the document was originally created.
64
+ - `license` (`str`): The license of the document. The licenses vary according to the source.
65
+ - `domain` (`str`): The domain of the source
66
+ - `metadata/source-pretty` (`str`): The long form version of the short-form source name
67
+ - `metadata/*`: Potentially additional metadata
68
+ <!-- END-SAMPLE -->
69
+
70
+
71
+ ### Dataset Statistics
72
+
73
+ <!-- START-DATASET PLOTS -->
74
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
75
+ <img>
76
+ <!-- END-DATASET PLOTS -->
77
+
78
+
79
+
80
+ ## Additional Information
81
+
82
+
83
+
84
+ ### Sourced data
85
+ This dataset is derived from [`AI-aktindsigt/Skrabet_kommunale_hjemmesider`](https://huggingface.co/datasets/AI-aktindsigt/Skrabet_kommunale_hjemmesider/tree/main
86
+ )
87
+
88
+ ### Citation Information
89
+
90
+ No citation is applicable for this work. We recommend citing the huggingface repository.
data/ai-aktindsigt/ai-aktindsigt.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b395bdf2fe3c9f7beb8b7073a1aea72952237e2a63965302ebd199ca46af632a
3
+ size 213799195
data/ai-aktindsigt/create.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "datasets>=3.2.0",
5
+ # ]
6
+ # ///
7
+ """
8
+ This script is used to create the data for the AI-aktindsigt project.
9
+
10
+ This derived the data from a .json.gz file.
11
+ """
12
+
13
+ from pathlib import Path
14
+ from typing import cast
15
+
16
+ from datasets import Dataset, load_dataset
17
+
18
+ source = "ai-aktindsigt"
19
+
20
+
21
+ def convert_sample(example):
22
+ # {'text': 'Vallensbæk Stationstorv 100 2665 Vallensbæk Strand Telefon: +45 4797 4000',
23
+ # 'id': '0_03fe7662f6d37df0ffbf5013907414f935350db9931043891a95ed830965a507a7bcb4df93741429bdfa4958cf25f6c273aa73146f2be80948f767eb5fa04645',
24
+ # 'source': 'AI-aktindsigt',
25
+ # 'added': '2024-04-16T12:35:52.000Z',
26
+ # 'metadata': {'url': 'https://vallensbaek.dk/', 'kommune': 'vallensbaek', 'sentence': 1,
27
+ # 'ppl_score': [634.6341],
28
+ # 'sha512': '03fe7662f6d37df0ffbf5013907414f935350db9931043891a95ed830965a507a7bcb4df93741429bdfa4958cf25f6c273aa73146f2be80948f767eb5fa04645'}
29
+ # }
30
+
31
+ new_example = dict(
32
+ text_new=example["text"],
33
+ source=source,
34
+ domain="Web",
35
+ license="Apache-2.0",
36
+ added="2025-03-24",
37
+ created="2010-01-01, 2024-03-18", # Start date is approximate guess end date is the date of the last update
38
+ metadata={"source-pretty": "AI Aktindsigt"},
39
+ )
40
+
41
+ return new_example
42
+
43
+
44
+ def main():
45
+ data_path = Path(
46
+ "/work/dfm-data/pre-training/ai_aktindsigt/documents/ai_aktindsigt.jsonl.gz"
47
+ )
48
+ ds = load_dataset("json", data_files=data_path.as_posix(), split="train")
49
+
50
+ ds = cast(Dataset, ds)
51
+
52
+ ds = ds.map(convert_sample, remove_columns=ds.column_names)
53
+ ds = ds.rename_columns({"text_new": "text"})
54
+ ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore
55
+ ds = ds.select_columns(
56
+ ["text", "source", "id", "added", "created", "license", "domain", "metadata"]
57
+ )
58
+
59
+ save_path = Path(__file__).parent / f"{source}.parquet"
60
+ ds.to_parquet(save_path)
61
+
62
+
63
+ if __name__ == "__main__":
64
+ main()
data/ai-aktindsigt/descriptive_stats.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 200914,
3
+ "average_document_length": 2030.7490916511542,
4
+ "number_of_tokens": 139234696,
5
+ "language": "dan, dansk, Danish",
6
+ "revision": "62e1cea23e4c6154d47ca4d78d731478623a99b9"
7
+ }
data/ai-aktindsigt/images/dist_document_length.png ADDED

Git LFS Details

  • SHA256: ffd9fd1eec77175d6957fe7efff104e37a2343f24ce521300c479b8e448e023e
  • Pointer size: 131 Bytes
  • Size of remote file: 184 kB
data/cellar/cellar.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: Cellar
3
+ language:
4
+ - da
5
+ license: cc-by-sa-4.0
6
+ license_name: CC-BY-SA 4.0
7
+ task_categories:
8
+ - text-generation
9
+ - fill-mask
10
+ task_ids:
11
+ - language-modeling
12
+ ---
13
+
14
+ # Dataset Card for Finansministeriets Udgivelser
15
+
16
+ <!-- START-SHORT DESCRIPTION -->
17
+ The official digital repository for European Union legal documents and open data.
18
+ <!-- END-SHORT DESCRIPTION -->
19
+
20
+ The EU Dataset Cellar serves as the central access point for all official EU publications, legislation, and open data resources. Maintained by the Publications Office of the European Union, this comprehensive digital archive contains millions of documents in multiple languages, including regulations, directives, decisions, treaties, case law, and preparatory acts dating back decades. The repository employs standardized metadata and unique identifiers to organize its vast collection, making it an essential resource for researchers, legal professionals, policymakers, and citizens seeking authoritative information on EU law and policy. The Cellar's linked data architecture also enables sophisticated search capabilities and integration with other information systems across the European Union's digital landscape.
21
+
22
+
23
+ ## Dataset Description
24
+
25
+ <!-- START-DESC-STATS -->
26
+ - **Language**: dan, dansk, Danish
27
+ - **Number of samples**: 65.74K
28
+ - **Number of tokens (Llama 3)**: 1.28B
29
+ - **Average document length (characters)**: 64221.30
30
+ <!-- END-DESC-STATS -->
31
+
32
+
33
+ ## Dataset Structure
34
+ An example from the dataset looks as follows.
35
+
36
+
37
+ <!-- START-SAMPLE -->
38
+ ```py
39
+ {
40
+ "text": "\n\n\n\n© Европейски съюз, 2017 г.\n\nВъзпроизвеждането е разрешено при позоваване на оригинала.\n\n© Unión [...]",
41
+ "source": "cellar",
42
+ "id": "cellar_0",
43
+ "added": "2025-03-25",
44
+ "created": "2024-01-01, 2026-01-01",
45
+ "license": "cc-by-sa-4.0",
46
+ "domain": "Legal",
47
+ "metadata": {
48
+ "source-pretty": "Cellar"
49
+ }
50
+ }
51
+ ```
52
+
53
+ ### Data Fields
54
+
55
+ An entry in the dataset consists of the following fields:
56
+
57
+ - `text`(`str`): The content of the document.
58
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
59
+ - `id` (`str`): An unique identifier for each document.
60
+ - `added` (`str`): An date for when the document was added to this collection.
61
+ - `created` (`str`): An date range for when the document was originally created.
62
+ - `license` (`str`): The license of the document. The licenses vary according to the source.
63
+ - `domain` (`str`): The domain of the source
64
+ - `metadata/source-pretty` (`str`): The long form version of the short-form source name
65
+ - `metadata/*`: Potentially additional metadata
66
+ <!-- END-SAMPLE -->
67
+
68
+
69
+ ### Dataset Statistics
70
+
71
+ <!-- START-DATASET PLOTS -->
72
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
73
+ <img>
74
+ <!-- END-DATASET PLOTS -->
75
+
76
+
77
+
78
+ ## Additional Information
79
+
80
+ ### Citation Information
81
+
82
+ No citation is applicable for this work.
data/cellar/cellar.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90fb4cadd0d6ab84c7e1c6e9029210e0bfadaa1d9a503240067d4069d38b9bcd
3
+ size 1433372916
data/cellar/create.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "datasets>=3.2.0",
5
+ # ]
6
+ # ///
7
+
8
+ from pathlib import Path
9
+ from typing import cast
10
+ from datasets import Dataset, load_dataset, concatenate_datasets
11
+
12
+ source = "cellar"
13
+
14
+
15
+ def convert_sample(example):
16
+ new_example = dict(
17
+ text_new=example["text"],
18
+ source=source,
19
+ domain="Legal",
20
+ license="cc-by-sa-4.0",
21
+ added="2025-03-25",
22
+ created="2024-01-01, 2026-01-01", # Scrape happened within these years - data likely written earlier
23
+ metadata={"source-pretty": "Cellar"},
24
+ )
25
+
26
+ return new_example
27
+
28
+
29
+ def main():
30
+ data_path = Path("/work/dfm-data/pre-training/cellar/documents")
31
+ data_paths = [p.as_posix() for p in data_path.glob("DAN*.jsonl.gz")]
32
+ dfs = []
33
+ for i, path in enumerate(data_paths):
34
+ print(i, path.split("/")[-1])
35
+ try:
36
+ ds = load_dataset(
37
+ "json", data_files=path, split="train"
38
+ ) # a few datasets fail to load
39
+ dfs.append(ds)
40
+ print("\tSuccess")
41
+ except Exception:
42
+ print("\tFail")
43
+
44
+ ds = concatenate_datasets(dsets=dfs)
45
+
46
+ ds = cast(Dataset, ds)
47
+
48
+ ds = ds.map(convert_sample, remove_columns=ds.column_names)
49
+ ds = ds.rename_columns({"text_new": "text"})
50
+ ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore
51
+ ds = ds.select_columns(
52
+ ["text", "source", "id", "added", "created", "license", "domain", "metadata"]
53
+ )
54
+
55
+ save_path = Path(__file__).parent / f"{source}.parquet"
56
+ ds.to_parquet(save_path)
57
+
58
+
59
+ if __name__ == "__main__":
60
+ main()
data/cellar/descriptive_stats.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 65736,
3
+ "average_document_length": 64221.30202628697,
4
+ "number_of_tokens": 1280909738,
5
+ "language": "dan, dansk, Danish",
6
+ "revision": "62e1cea23e4c6154d47ca4d78d731478623a99b9"
7
+ }
data/cellar/images/dist_document_length.png ADDED

Git LFS Details

  • SHA256: d55c527928475485bf7f4b8b3050f5acd773e684546afd842c59f2f57a36c4d8
  • Pointer size: 131 Bytes
  • Size of remote file: 181 kB
data/danske-taler/create.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "beautifulsoup4==4.13.3",
5
+ # "datasets>=3.0.0",
6
+ # ]
7
+ # ///
8
+ """
9
+ Danske Taler API Downloader
10
+ This script downloads speeches/articles from the Danske Taler API: https://www.dansketaler.dk/api/v1
11
+
12
+ It saves it into the following structure:
13
+
14
+ ```
15
+ {
16
+ "text": "Lav et referat af nedenstående tekst:\n\nTekst:\nOpdatering: Manden er nu fundet af Nordjyllands Politi[...]",
17
+ "source": "nordjyllandnews",
18
+ "id": "nordjyllandnews_0",
19
+ "added": "2024-12-16",
20
+ "created": "2000-01-01, 2024-01-01",
21
+ "license": "Creative Commons Legal Code\n\nCC0 1.0 Universal",
22
+ "domain": "News",
23
+ "metadata": {
24
+ "source-pretty": "Nordjylland News"
25
+ }
26
+ }
27
+ ```
28
+
29
+ """
30
+
31
+ import logging
32
+ import time
33
+ from datetime import date
34
+ from pathlib import Path
35
+ from typing import Any
36
+
37
+ import datasets
38
+ import pandas as pd
39
+ import requests
40
+ from bs4 import BeautifulSoup
41
+ from tqdm import tqdm
42
+
43
+ logger = logging.getLogger(__name__)
44
+
45
+ # Configuration
46
+ API_BASE_URL = "https://www.dansketaler.dk/api/v1"
47
+
48
+
49
+ def get_all_speeches() -> list[dict[str, Any]]:
50
+ # fetch first page, notably the total number of pages
51
+ url = f"{API_BASE_URL}/speeches"
52
+ response = requests.get(url)
53
+ response.raise_for_status()
54
+ speeches = response.json()
55
+ meta = speeches["meta"]
56
+ total_pages = meta["total_pages"]
57
+
58
+ # fetch all pages
59
+ all_speeches = []
60
+ for page in range(1, total_pages + 1):
61
+ url = f"{API_BASE_URL}/speeches?page={page}"
62
+ response = requests.get(url)
63
+ response.raise_for_status()
64
+ speeches = response.json()
65
+ all_speeches.extend(speeches["speeches"])
66
+
67
+ return all_speeches
68
+
69
+
70
+ def fetch_license_div(
71
+ url: str, max_retries: int = 3, backoff_factor: float = 0.5
72
+ ) -> str | None:
73
+ """
74
+ Fetches the license div from the page with retry logic.
75
+
76
+ Args:
77
+ url: The URL to fetch the license div from
78
+ max_retries: Maximum number of retry attempts
79
+ backoff_factor: Factor to determine exponential backoff time between retries
80
+
81
+ Returns:
82
+ The text content of the license div if found, None otherwise
83
+ """
84
+ retries = 0
85
+
86
+ while retries <= max_retries:
87
+ try:
88
+ response = requests.get(url, timeout=10)
89
+ response.raise_for_status()
90
+
91
+ soup = BeautifulSoup(response.text, "html.parser")
92
+ license_div = soup.find("div", class_="speech-copyright")
93
+
94
+ return license_div.text if license_div else None
95
+
96
+ except (requests.RequestException, AttributeError) as e:
97
+ retries += 1
98
+
99
+ if retries > max_retries:
100
+ logger.info(f"Failed to fetch license after {max_retries} attempts: {str(e)}")
101
+ return None
102
+
103
+ # Calculate backoff time using exponential backoff
104
+ wait_time = backoff_factor * (2 ** (retries - 1))
105
+ logger.info(f"Attempt {retries} failed. Retrying in {wait_time:.2f} seconds...")
106
+ time.sleep(wait_time)
107
+
108
+ return None
109
+
110
+
111
+ def convert_to_license(license_information: str | None) -> str | None:
112
+ """checks if "Materialet er fri af ophavsret" is in the page"""
113
+
114
+ if license_information and (
115
+ ("Materialet er fri af ophavsret" in license_information)
116
+ or ("Materialet er fri af ophvasret" in license_information)
117
+ or ("Ophavsretten er bortfaldet" in license_information)
118
+ or ("Manuskriptet er fri af ophavsret" in license_information)
119
+ or ("Offentlig " == license_information)
120
+ ):
121
+ return "cc0"
122
+
123
+ return license_information
124
+
125
+
126
+ def convert_to_row(speech_meta: dict[str, Any]) -> dict[str, Any]:
127
+ speech_id = speech_meta["id"]
128
+
129
+ date_of_speech = speech_meta["date"]["iso_date"]
130
+ date_of_speech_start = f"{date_of_speech}"
131
+ date_of_speech_end = f"{date_of_speech}"
132
+
133
+ license_information = fetch_license_div(speech_meta["url"])
134
+
135
+ row = {
136
+ "text": speech_meta["transcription"],
137
+ "source": "danske-taler",
138
+ "id": f"danske-taler_{speech_id}",
139
+ # current date
140
+ "added": date.today().isoformat(),
141
+ "created": f"{date_of_speech_start}, {date_of_speech_end}",
142
+ "license_information": license_information,
143
+ "domain": "Spoken",
144
+ "metadata": {"source-pretty": "Danske Taler"},
145
+ }
146
+
147
+ return row
148
+
149
+
150
+ def download_speeches() -> pd.DataFrame:
151
+ logger.info("Fetching all speeches from Danske Taler API")
152
+ speeches = get_all_speeches()
153
+ logger.info(f"Found {len(speeches)} speeches")
154
+
155
+ rows = []
156
+ for speech in tqdm(speeches):
157
+ row = convert_to_row(speech)
158
+ rows.append(row)
159
+
160
+ logger.info(f"Saving {len(rows)} speeches to dataset")
161
+ df = pd.DataFrame(rows)
162
+ return df
163
+
164
+
165
+ def main():
166
+ save_path = Path(__file__).parent / "danske-taler.parquet"
167
+ save_path_all = Path(__file__).parent / "tmp" / "danske-taler-all.parquet"
168
+ save_path_all.parent.mkdir(parents=False, exist_ok=True)
169
+
170
+ if save_path_all.exists():
171
+ logger.info(f"Loading dataset from {save_path_all}")
172
+ df = pd.read_parquet(save_path_all)
173
+ else:
174
+ logger.info(f"Downloading speeches and saving to {save_path_all}")
175
+ df = download_speeches()
176
+ df.to_parquet(save_path_all)
177
+
178
+ licenses = [convert_to_license(license) for license in df["license_information"]]
179
+ df["license"] = licenses
180
+
181
+ uniques_licenses = set(df["license"].tolist())
182
+ logger.info("Unique licenses:")
183
+ for license in uniques_licenses:
184
+ logger.info(f"\t{license}")
185
+
186
+ # remove documents without a cc0 license
187
+ len_df = len(df)
188
+ df = df[df["license"] == "cc0"]
189
+ logger.info(f"Removed {len_df - len(df)} documents without a cc0 license")
190
+
191
+ # remove duplicate ids
192
+ len_df = len(df)
193
+ df = df.drop_duplicates(subset=["id"])
194
+ logger.info(f"Removed {len_df - len(df)} duplicate ids")
195
+
196
+ # remove rows with empty text
197
+ len_df = len(df)
198
+ df = df[df["text"].str.strip() != ""]
199
+ logger.info(f"Removed {len_df - len(df)} rows with empty text")
200
+
201
+ # remove rows with duplicate text
202
+ len_df = len(df)
203
+ df = df.drop_duplicates(subset=["text"])
204
+ logger.info(f"Removed {len_df - len(df)} rows with duplicate text")
205
+
206
+ dataset = datasets.Dataset.from_pandas(df)
207
+ assert len(set(dataset["id"])) == len(dataset), "IDs are not unique"
208
+ assert len(set(dataset["text"])) == len(dataset), "Texts are not unique"
209
+ assert len(set(dataset["license"])) == 1, "Multiple licenses found"
210
+
211
+ # check for html tags in text
212
+ assert not df["text"].str.contains("<[^>]*>").any(), "HTML tags found in text"
213
+
214
+ dataset.to_parquet(save_path)
215
+
216
+
217
+ if __name__ == "__main__":
218
+ log_path = Path(__file__).parent / "danske-taler.log"
219
+ logging.basicConfig(
220
+ level=logging.INFO,
221
+ format="%(asctime)s - %(levelname)s - %(message)s",
222
+ handlers=[
223
+ logging.StreamHandler(),
224
+ logging.FileHandler(log_path),
225
+ ],
226
+ )
227
+ main()
data/danske-taler/danske-taler.log ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2025-03-29 14:14:08,846 - INFO - Downloading speeches and saving to /work/githubs/tmp/danish-dynaword/data/danske-taler/tmp/danske-taler-all.parquet
2
+ 2025-03-29 14:14:08,847 - INFO - Fetching all speeches from Danske Taler API
3
+ 2025-03-29 14:15:19,326 - INFO - Found 4725 speeches
4
+ 13%|██████████▏ | 597/4725 [01:22<11:15, 6.11it/s]Attempt 1 failed. Retrying in 0.50 seconds...
5
+ Attempt 2 failed. Retrying in 1.00 seconds...
6
+ Attempt 3 failed. Retrying in 2.00 seconds...
7
+ Failed to fetch license after 3 attempts: 500 Server Error: Internal Server Error for url: https://www.dansketaler.dk/tale/niels-hoejlund-pedersens-translokationstale-2020
8
+ 17%|██████████████ | 818/4725 [01:57<09:00, 7.23it/s]Attempt 1 failed. Retrying in 0.50 seconds...
9
+ Attempt 2 failed. Retrying in 1.00 seconds...
10
+ Attempt 3 failed. Retrying in 2.00 seconds...
11
+ Failed to fetch license after 3 attempts: 500 Server Error: Internal Server Error for url: https://www.dansketaler.dk/tale/katrine-lykke-pedersens-tale-til-unge-om-haab-i-en-coronatid
12
+ 17%|█████████████▋ | 820/4725 [02:01<1:05:16, 1.00s/it]Attempt 1 failed. Retrying in 0.50 seconds...
13
+ Attempt 2 failed. Retrying in 1.00 seconds...
14
+ Attempt 3 failed. Retrying in 2.00 seconds...
15
+ Failed to fetch license after 3 attempts: 500 Server Error: Internal Server Error for url: https://www.dansketaler.dk/tale/anastacia-halkens-tale-til-unge-om-haab-i-en-coronatid
16
+ 18%|██████████████▏ | 828/4725 [02:07<17:53, 3.63it/s]Attempt 1 failed. Retrying in 0.50 seconds...
17
+ Attempt 2 failed. Retrying in 1.00 seconds...
18
+ Attempt 3 failed. Retrying in 2.00 seconds...
19
+ Failed to fetch license after 3 attempts: 500 Server Error: Internal Server Error for url: https://www.dansketaler.dk/tale/thomas-vinterbergs-tale-ved-modtagelsen-af-oscar-prisen
20
+ 22%|█████████████████▋ | 1042/4725 [02:41<10:04, 6.09it/s]Attempt 1 failed. Retrying in 0.50 seconds...
21
+ Attempt 2 failed. Retrying in 1.00 seconds...
22
+ Attempt 3 failed. Retrying in 2.00 seconds...
23
+ Failed to fetch license after 3 attempts: 500 Server Error: Internal Server Error for url: https://www.dansketaler.dk/tale/pernille-vermunds-tale-ved-folketingets-aabningsdebat-2021
24
+ 22%|█████████████████▉ | 1059/4725 [02:48<08:22, 7.30it/s]Attempt 1 failed. Retrying in 0.50 seconds...
25
+ Attempt 2 failed. Retrying in 1.00 seconds...
26
+ Attempt 3 failed. Retrying in 2.00 seconds...
27
+ Failed to fetch license after 3 attempts: 500 Server Error: Internal Server Error for url: https://www.dansketaler.dk/tale/pernille-vermunds-tale-ved-nye-borgerliges-aarsmoede-2021
28
+ 22%|█████████████████▌ | 1061/4725 [02:52<1:01:08, 1.00s/it]Attempt 1 failed. Retrying in 0.50 seconds...
29
+ Attempt 2 failed. Retrying in 1.00 seconds...
30
+ Attempt 3 failed. Retrying in 2.00 seconds...
31
+ Failed to fetch license after 3 attempts: 500 Server Error: Internal Server Error for url: https://www.dansketaler.dk/tale/mette-thiesens-tale-ved-nye-borgerliges-aarsmoede-2021
32
+ 22%|█████████████████▌ | 1062/4725 [02:57<2:00:22, 1.97s/it]Attempt 1 failed. Retrying in 0.50 seconds...
33
+ Attempt 2 failed. Retrying in 1.00 seconds...
34
+ Attempt 3 failed. Retrying in 2.00 seconds...
35
+ Failed to fetch license after 3 attempts: 500 Server Error: Internal Server Error for url: https://www.dansketaler.dk/tale/peter-seier-christensens-tale-ved-nye-borgerliges-aarsmoede-2021
36
+ 34%|███████████████████████████▍ | 1617/4725 [04:25<07:09, 7.24it/s]Attempt 1 failed. Retrying in 0.50 seconds...
37
+ Attempt 2 failed. Retrying in 1.00 seconds...
38
+ Attempt 3 failed. Retrying in 2.00 seconds...
39
+ Failed to fetch license after 3 attempts: 500 Server Error: Internal Server Error for url: https://www.dansketaler.dk/tale/silke-ena-svares-tale-ved-demonstrationen-for-born-og-unge
40
+ 100%|████████████████████████████████████████████████████████████████████████████████| 4725/4725 [12:43<00:00, 6.19it/s]
41
+ 2025-03-29 14:28:02,454 - INFO - Saving 4725 speeches to dataset
42
+ 2025-03-29 14:28:03,330 - INFO - Unique licenses:
43
+ 2025-03-29 14:28:03,331 - INFO - None
44
+ 2025-03-29 14:28:03,331 - INFO - Materialet er beskyttet af ophavsret
45
+ 2025-03-29 14:28:03,331 - INFO - cc0
46
+ 2025-03-29 14:28:03,331 - INFO - Materialet er beskyttet af ophavsret, da talen ikke er holdt i offentligheden.
47
+ 2025-03-29 14:28:03,331 - INFO - Materialet er omfattet af ophavsret
48
+ 2025-03-29 14:28:03,331 - INFO - Manuskript taget fra ft.dk. med tilladelse fra udgiver.
49
+ 2025-03-29 14:28:03,331 - INFO - Materialet et beskyttet af ophavsret
50
+ 2025-03-29 14:28:03,331 - INFO - Manuskript taget fra ft.dk med tilladelse fra udgiver.
51
+ 2025-03-29 14:28:03,331 - INFO - Materialet er beskyttet af ophavsret
52
+ 2025-03-29 14:28:03,331 - INFO - Materialet er beskyttet af ophavsret
53
+ 2025-03-29 14:28:03,461 - INFO - Removed 2063 documents without a cc0 license
54
+ 2025-03-29 14:28:03,541 - INFO - Removed 0 duplicate ids
55
+ 2025-03-29 14:28:03,549 - INFO - Removed 2 rows with empty text
56
+ 2025-03-29 14:28:03,631 - INFO - Removed 2 rows with duplicate text
57
+ Creating parquet from Arrow format: 100%|██████████████████████████████████████████████████| 3/3 [00:00<00:00, 11.33ba/s]
data/danske-taler/danske-taler.md ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: Danske Taler
3
+ language:
4
+ - da
5
+ license: cc0-1.0
6
+ license_name: CC-0
7
+ task_categories:
8
+ - text-generation
9
+ - fill-mask
10
+ task_ids:
11
+ - language-modeling
12
+ ---
13
+
14
+ # Dataset Card for Nordjylland News
15
+
16
+ <!-- START-SHORT DESCRIPTION -->
17
+ Danish Speeches from [dansketaler.dk](https://www.dansketaler.dk)
18
+ <!-- END-SHORT DESCRIPTION -->
19
+
20
+
21
+ The database dansketaler.dk is managed by Danske Taler, an independent institution that in addition to managing the database and carries out cultural
22
+ and democratic projects based on speeches.
23
+ Danske Taler state as their goals that they seek to preserve our cultural heritage and promotes active citizenship and democratic confidence through its work.
24
+ Additionally, Danske Taler provides data to a number of online resources, including: lex.dk, sprogteknologi.dk, and ordnet.dk.
25
+
26
+ The goal of the dataset is to collect historical and timely speeches and make them available for the public.
27
+
28
+ Learn more about danske taler by reading their [about us](https://www.dansketaler.dk/om-os) page.
29
+
30
+ ## Dataset Description
31
+
32
+
33
+ <!-- START-DESC-STATS -->
34
+ - **Language**: dan, dansk, Danish
35
+ - **Number of samples**: 2.66K
36
+ - **Number of tokens (Llama 3)**: 8.23M
37
+ - **Average document length (characters)**: 9446.88
38
+ <!-- END-DESC-STATS -->
39
+
40
+
41
+ ## Dataset Structure
42
+ An example from the dataset looks as follows.
43
+
44
+
45
+ <!-- START-SAMPLE -->
46
+ ```py
47
+ {
48
+ "text": "Den 1. august i år var der forløbet 25 år siden den sidste verdenskrigs udbrud. En måned senere - de[...]",
49
+ "source": "danske-taler",
50
+ "id": "danske-taler_278",
51
+ "added": "2025-03-28",
52
+ "created": "1939-09-20T00:00:00Z, 1939-09-20T23:59:59Z",
53
+ "license": "cc0",
54
+ "license_information": "Materialet er fri af ophavsret",
55
+ "domain": "Spoken",
56
+ "metadata": {
57
+ "source-pretty": "Danske Taler"
58
+ },
59
+ "__index_level_0__": 20
60
+ }
61
+ ```
62
+
63
+ ### Data Fields
64
+
65
+ An entry in the dataset consists of the following fields:
66
+
67
+ - `text`(`str`): The content of the document.
68
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
69
+ - `id` (`str`): An unique identifier for each document.
70
+ - `added` (`str`): An date for when the document was added to this collection.
71
+ - `created` (`str`): An date range for when the document was originally created.
72
+ - `license` (`str`): The license of the document. The licenses vary according to the source.
73
+ - `domain` (`str`): The domain of the source
74
+ - `metadata/source-pretty` (`str`): The long form version of the short-form source name
75
+ - `metadata/*`: Potentially additional metadata
76
+ <!-- END-SAMPLE -->
77
+
78
+
79
+ ### Dataset Statistics
80
+
81
+ <!-- START-DATASET PLOTS -->
82
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
83
+ <img>
84
+ <!-- END-DATASET PLOTS -->
85
+
86
+
87
+
88
+ ## Additional Information
89
+
90
+
91
+ ### Dataset Collection Process
92
+
93
+ This dataset was collected using the publicly available [API](https://www.dansketaler.dk/api/v1).
94
+
95
+ ### Quality Assurance
96
+ We check for and remove exact duplicates, empty texts, duplicate ids after the initial download. We additionally check if the articles contain any HTML.
97
+
98
+ ## Opportunities for Improvement
99
+
100
+ While this dataset can be updated to include the latest availabe speeches.
101
+
102
+ We consider the quality of the current collection high with a low chance of
103
+ incorrect formatting,
104
+ spelling errors,
105
+ empty documents or
106
+ misformatted segments.
107
+ This stems both from the quality assurance, source of documents and subjective inspection.
108
+
109
+ ### License Information
110
+ Since the license information isn't avaiable through the API we collect this data directly from the webpage of each article under the header
111
+ "Ophavsret".
112
+
113
+ For speeches where it is noted that *"Materialet er fri af ophavsret"* (The material is in the public domain) or similarly we assign it a `cc0` license.
114
+
115
+ Such an example can be seen here:
116
+
117
+ > **Ophavsret**
118
+ >
119
+ > Materialet er fri af ophavsret. Taler, som er holdt i offentligheden, er ikke omfattet af ophavsret (Jf. ophavsretslovens § 26 og 32).
120
+ > Det betyder, at når en tale er indgået i Danske Talers database, kan den bruges af tredjeparter, fx til undervisning eller forskning.
121
+ >
122
+ > *source: [Ursula von der Leyens tale om europæisk forsvar og sikkerhed på Hærens Officersskole](https://www.dansketaler.dk/tale/tale-om-europaeisk-forsvar-og-sikkerhed-pa-haerens-officersskole)*
123
+
124
+ Speeches without this mention is removed. Such an example include:
125
+
126
+ > **Ophavsret**
127
+ >
128
+ > Materialet er beskyttet af ophavsret
129
+ >
130
+ > *Source: [Christina Egelunds tale ved Aarhus Universitets årsfest](https://www.dansketaler.dk/tale/christina-egelunds-tale-ved-aarhus-universitets-arsfest)*
131
+
132
+ We manually checked the unique set of license descriptions to see if any were open licenses that weren't included in the current criteria.
133
+
134
+ For specific filtering criteria see the `create.py` script.
135
+
136
+ ### Citation Information
137
+
138
+ No citation is applicable for this work. We recommend citing the huggingface repository.
data/danske-taler/danske-taler.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:541b5692b2f9cd0a35eb179f2b1ec6786f819ac9f16c9d3c67f5265fc458cbdc
3
+ size 15090122
data/danske-taler/descriptive_stats.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 2657,
3
+ "average_document_length": 9446.875799774181,
4
+ "number_of_tokens": 8225350,
5
+ "language": "dan, dansk, Danish",
6
+ "revision": "62e1cea23e4c6154d47ca4d78d731478623a99b9"
7
+ }
data/danske-taler/images/dist_document_length.png ADDED

Git LFS Details

  • SHA256: 8abb94950063fe4e069fc28e9e4e03d5db685166b9696ccf83247c855320a53e
  • Pointer size: 131 Bytes
  • Size of remote file: 184 kB
data/eur-lex-sum-da/create.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "datasets>=3.2.0",
5
+ # ]
6
+ # ///
7
+
8
+ from pathlib import Path
9
+ from typing import cast
10
+
11
+ from datasets import Dataset, load_dataset
12
+
13
+ source = "eur-lex-sum-da"
14
+
15
+
16
+ def convert_sample(example):
17
+ new_example = dict(
18
+ text_new=example["text"],
19
+ source=source,
20
+ domain="Legal",
21
+ license="cc-by-sa-4.0",
22
+ added="2025-03-24",
23
+ created="2024-01-01, 2025-01-01", # Scrape happen within the year - data likely written earlier
24
+ metadata={"source-pretty": "Eur-lex-sum-da"},
25
+ )
26
+
27
+ return new_example
28
+
29
+
30
+ def main():
31
+ data_path = Path(
32
+ "/work/dfm-data/pre-training/eur-lex-sum-da/documents/eur-lex-sum-da.jsonl.gz"
33
+ )
34
+ ds = load_dataset("json", data_files=data_path.as_posix(), split="train")
35
+
36
+ ds = cast(Dataset, ds)
37
+
38
+ ds = ds.map(convert_sample, remove_columns=ds.column_names)
39
+ ds = ds.rename_columns({"text_new": "text"})
40
+ ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore
41
+ ds = ds.select_columns(
42
+ ["text", "source", "id", "added", "created", "license", "domain", "metadata"]
43
+ )
44
+
45
+ save_path = Path(__file__).parent / f"{source}.parquet"
46
+ ds.to_parquet(save_path)
47
+
48
+
49
+ if __name__ == "__main__":
50
+ main()
data/eur-lex-sum-da/descriptive_stats.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 1002,
3
+ "average_document_length": 87627.37025948103,
4
+ "number_of_tokens": 31367665,
5
+ "language": "dan, dansk, Danish",
6
+ "revision": "62e1cea23e4c6154d47ca4d78d731478623a99b9"
7
+ }
data/eur-lex-sum-da/eur-lex-sum-da.md ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: EUR-Lex SUM
3
+ language:
4
+ - da
5
+ license: cc-by-sa-4.0
6
+ license_name: CC-BY-SA 4.0
7
+ task_categories:
8
+ - text-generation
9
+ - fill-mask
10
+ task_ids:
11
+ - language-modeling
12
+ ---
13
+
14
+ # Dataset Card for EUR-Lex SUM
15
+
16
+ <!-- START-SHORT DESCRIPTION -->
17
+ The Danish subsection of EUR-lex SUM consisting of EU legislation paired with professionally written summaries.
18
+ <!-- END-SHORT DESCRIPTION -->
19
+
20
+ EUR-Lex SUM is a dataset containing summaries of EU legislation from the EUR-Lex database. It consists of pairs of full legal texts and their corresponding professionally written summaries, covering European Union legal documents.
21
+ The dataset is designed for training and evaluating automatic text summarization systems, particularly for legal documents. It's valuable for natural language processing (NLP) research since it provides high-quality, human-written summaries of complex legal texts in a specialized domain.
22
+
23
+
24
+ ## Dataset Description
25
+
26
+ <!-- START-DESC-STATS -->
27
+ - **Language**: dan, dansk, Danish
28
+ - **Number of samples**: 1.00K
29
+ - **Number of tokens (Llama 3)**: 31.37M
30
+ - **Average document length (characters)**: 87627.37
31
+ <!-- END-DESC-STATS -->
32
+
33
+
34
+ ## Dataset Structure
35
+ An example from the dataset looks as follows.
36
+
37
+
38
+ <!-- START-SAMPLE -->
39
+ ```py
40
+ {
41
+ "text": "21.6.2019\nDA\nDen Europæiske Unions Tidende\nL 166/26\nKOMMISSIONENS DELEGEREDE FORORDNING (EU) 2019/98[...]",
42
+ "source": "eur-lex-sum-da",
43
+ "id": "eur-lex-sum-da_0",
44
+ "added": "2025-03-24 00:00:00",
45
+ "created": "2024-01-01, 2025-01-01",
46
+ "license": "cc-by-sa-4.0",
47
+ "domain": "Legal",
48
+ "metadata": {
49
+ "source-pretty": "Eur-lex-sum-da"
50
+ }
51
+ }
52
+ ```
53
+
54
+ ### Data Fields
55
+
56
+ An entry in the dataset consists of the following fields:
57
+
58
+ - `text`(`str`): The content of the document.
59
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
60
+ - `id` (`str`): An unique identifier for each document.
61
+ - `added` (`str`): An date for when the document was added to this collection.
62
+ - `created` (`str`): An date range for when the document was originally created.
63
+ - `license` (`str`): The license of the document. The licenses vary according to the source.
64
+ - `domain` (`str`): The domain of the source
65
+ - `metadata/source-pretty` (`str`): The long form version of the short-form source name
66
+ - `metadata/*`: Potentially additional metadata
67
+ <!-- END-SAMPLE -->
68
+
69
+
70
+ ### Dataset Statistics
71
+
72
+ <!-- START-DATASET PLOTS -->
73
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
74
+ <img>
75
+ <!-- END-DATASET PLOTS -->
76
+
77
+
78
+
79
+ ## Additional Information
80
+
81
+
82
+
83
+
84
+ ### Citation Information
85
+
86
+ No citation is applicable for this work.
data/eur-lex-sum-da/eur-lex-sum-da.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:960737a58b29e80b12f5150dacd7e0559c2ec7d3f2878a626e264b92595d9c02
3
+ size 35849965
data/eur-lex-sum-da/images/dist_document_length.png ADDED

Git LFS Details

  • SHA256: a351dbe01d1d663d848ea5ceaee24903b8814cbd914818c3487a0542efb0f872
  • Pointer size: 131 Bytes
  • Size of remote file: 185 kB
data/fm-udgivelser/create.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "datasets>=3.2.0",
5
+ # ]
6
+ # ///
7
+
8
+ from pathlib import Path
9
+ from typing import cast
10
+
11
+ from datasets import Dataset, load_dataset
12
+
13
+ source = "fm-udgivelser"
14
+
15
+
16
+ def convert_sample(example):
17
+ new_example = dict(
18
+ text_new=example["text"],
19
+ source=source,
20
+ domain="Legal",
21
+ license="cc-by-sa-4.0",
22
+ added="2025-03-24",
23
+ created="2024-01-01, 2026-01-01", # Scrape happen within these years - data likely written earlier
24
+ metadata={"source-pretty": "Finansministeriets Udgivelser"},
25
+ )
26
+
27
+ return new_example
28
+
29
+
30
+ def main():
31
+ data_path = Path(
32
+ "/work/dfm-data/pre-training/fm-udgivelser/documents/finans-ministeriet.jsonl.gz"
33
+ )
34
+ ds = load_dataset("json", data_files=data_path.as_posix(), split="train")
35
+
36
+ ds = cast(Dataset, ds)
37
+
38
+ ds = ds.map(convert_sample, remove_columns=ds.column_names)
39
+ ds = ds.rename_columns({"text_new": "text"})
40
+ ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore
41
+ ds = ds.select_columns(
42
+ ["text", "source", "id", "added", "created", "license", "domain", "metadata"]
43
+ )
44
+
45
+ save_path = Path(__file__).parent / f"{source}.parquet"
46
+ ds.to_parquet(save_path)
47
+
48
+
49
+ if __name__ == "__main__":
50
+ main()
data/fm-udgivelser/descriptive_stats.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 443,
3
+ "average_document_length": 490101.9300225734,
4
+ "number_of_tokens": 50335291,
5
+ "language": "dan, dansk, Danish",
6
+ "revision": "62e1cea23e4c6154d47ca4d78d731478623a99b9"
7
+ }
data/fm-udgivelser/fm-udgivelser.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: Finansministeriets Udgivelser
3
+ language:
4
+ - da
5
+ license: cc-by-sa-4.0
6
+ license_name: CC-BY-SA 4.0
7
+ task_categories:
8
+ - text-generation
9
+ - fill-mask
10
+ task_ids:
11
+ - language-modeling
12
+ ---
13
+
14
+ # Dataset Card for Finansministeriets Udgivelser
15
+
16
+ <!-- START-SHORT DESCRIPTION -->
17
+ The official publication series of the Danish Ministry of Finance containing economic analyses, budget proposals, and fiscal policy documents.
18
+ <!-- END-SHORT DESCRIPTION -->
19
+
20
+ Finansministeriets Udgivelser (translated as "Publications of the Ministry of Finance") is the publishing arm or publication series of the Danish Ministry of Finance. It includes official reports, economic analyses, budget proposals, fiscal policy documents, and various other publications related to Denmark's public finances, economic policy, and financial governance.
21
+
22
+ These publications typically provide insights into Denmark's economic outlook, public spending plans, tax policies, and financial reforms. They serve as important reference materials for economists, policy makers, researchers, and citizens interested in understanding Denmark's financial policies and economic direction.
23
+
24
+ The publications are authoritative sources of information on Danish fiscal policy and are often used by various stakeholders to track and analyze the country's economic performance and public finance management.
25
+
26
+
27
+ ## Dataset Description
28
+
29
+ <!-- START-DESC-STATS -->
30
+ - **Language**: dan, dansk, Danish
31
+ - **Number of samples**: 443
32
+ - **Number of tokens (Llama 3)**: 50.34M
33
+ - **Average document length (characters)**: 490101.93
34
+ <!-- END-DESC-STATS -->
35
+
36
+
37
+ ## Dataset Structure
38
+ An example from the dataset looks as follows.
39
+
40
+
41
+ <!-- START-SAMPLE -->
42
+ ```py
43
+ {
44
+ "text": "\n\nFinanslov for\n\nfinansåret 2023 Tekst og anmærkninger\n\n§ 1. Dronningen\n\n\n\n\n\n§ 1.\n\nDronningen\n\nTekst[...]",
45
+ "source": "fm-udgivelser",
46
+ "id": "fm-udgivelser_0",
47
+ "added": "2025-03-24",
48
+ "created": "2024-01-01, 2026-01-01",
49
+ "license": "cc-by-sa-4.0",
50
+ "domain": "Legal",
51
+ "metadata": {
52
+ "source-pretty": "Finansministeriets Udgivelser"
53
+ }
54
+ }
55
+ ```
56
+
57
+ ### Data Fields
58
+
59
+ An entry in the dataset consists of the following fields:
60
+
61
+ - `text`(`str`): The content of the document.
62
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
63
+ - `id` (`str`): An unique identifier for each document.
64
+ - `added` (`str`): An date for when the document was added to this collection.
65
+ - `created` (`str`): An date range for when the document was originally created.
66
+ - `license` (`str`): The license of the document. The licenses vary according to the source.
67
+ - `domain` (`str`): The domain of the source
68
+ - `metadata/source-pretty` (`str`): The long form version of the short-form source name
69
+ - `metadata/*`: Potentially additional metadata
70
+ <!-- END-SAMPLE -->
71
+
72
+
73
+ ### Dataset Statistics
74
+
75
+ <!-- START-DATASET PLOTS -->
76
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
77
+ <img>
78
+ <!-- END-DATASET PLOTS -->
79
+
80
+
81
+
82
+ ## Additional Information
83
+
84
+ <!--
85
+ ### How was the data collected
86
+
87
+ TOOD: KRISTIAN
88
+ -->
89
+
90
+ ### Citation Information
91
+
92
+ No citation is applicable for this work.
data/fm-udgivelser/fm-udgivelser.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:402b9281913aba87938e0600a994cb0387331efd7cf73829699b5103989d8747
3
+ size 59885539
data/fm-udgivelser/images/dist_document_length.png ADDED

Git LFS Details

  • SHA256: 1c15a9bbebc18ac4b07b46c8838414e9624c2b4cc5ffbfdcef4d1d97cad3162e
  • Pointer size: 131 Bytes
  • Size of remote file: 182 kB
data/miljoeportalen/create.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.12"
3
+ # dependencies = [
4
+ # "datasets>=3.2.0",
5
+ # ]
6
+ # ///
7
+
8
+ from pathlib import Path
9
+ from typing import cast
10
+
11
+ from datasets import Dataset, load_dataset
12
+
13
+ source = "miljoeportalen"
14
+
15
+
16
+ def convert_sample(example):
17
+ new_example = dict(
18
+ text_new=example["text"],
19
+ source=source,
20
+ domain="Web",
21
+ license="cc0",
22
+ added="2025-03-24",
23
+ created="2024-01-01, 2025-01-01", # Scrape happen within the year - data likely written earlier
24
+ metadata={"source-pretty": "Miljøportalen"},
25
+ )
26
+
27
+ return new_example
28
+
29
+
30
+ def main():
31
+ data_path = Path(
32
+ "/work/dfm-data/pre-training/miljoeportal/documents/miljoeportal.jsonl.gz"
33
+ )
34
+ ds = load_dataset("json", data_files=data_path.as_posix(), split="train")
35
+
36
+ ds = cast(Dataset, ds)
37
+
38
+ ds = ds.map(convert_sample, remove_columns=ds.column_names)
39
+ ds = ds.rename_columns({"text_new": "text"})
40
+ ds = ds.add_column("id", [f"{source}_{i}" for i in range(len(ds))]) # type: ignore
41
+ ds = ds.select_columns(
42
+ ["text", "source", "id", "added", "created", "license", "domain", "metadata"]
43
+ )
44
+
45
+ save_path = Path(__file__).parent / f"{source}.parquet"
46
+ ds.to_parquet(save_path)
47
+
48
+
49
+ if __name__ == "__main__":
50
+ main()
data/miljoeportalen/descriptive_stats.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "number_of_samples": 2169,
3
+ "average_document_length": 224704.25034578147,
4
+ "number_of_tokens": 128477101,
5
+ "language": "dan, dansk, Danish",
6
+ "revision": "62e1cea23e4c6154d47ca4d78d731478623a99b9"
7
+ }
data/miljoeportalen/images/dist_document_length.png ADDED

Git LFS Details

  • SHA256: c404cebb1c94e0ce0210074ba18c1b48356f69328990b8eac52673ad2ef47a14
  • Pointer size: 131 Bytes
  • Size of remote file: 184 kB
data/miljoeportalen/miljoeportalen.md ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pretty_name: Miljøportalen
3
+ language:
4
+ - da
5
+ license: cc0-1.0
6
+ license_name: CC-0
7
+ task_categories:
8
+ - text-generation
9
+ - fill-mask
10
+ task_ids:
11
+ - language-modeling
12
+ ---
13
+
14
+ # Dataset Card for Miljøportalen
15
+
16
+ <!-- START-SHORT DESCRIPTION -->
17
+ Data from [Danmarks Miljøportalen](https://www.miljoeportal.dk/om-danmarks-miljoeportal/) (Denmark's Environment Portal)
18
+ <!-- END-SHORT DESCRIPTION -->
19
+
20
+ Denmark's Environment Portal (Danmarks Miljøportal) is a joint public partnership owned by the state, municipalities, and regions, which aims to support digital environmental management in Denmark.
21
+
22
+ Danmarks Miljøportal's goal is for environmental data to be included early in all decisions that have an environmental impact. They do this by creating easy and open access to environmental data, making it possible for authorities and businesses to integrate the environment into their decisions.
23
+
24
+ This can be decisions specifically targeted at the environment such as water plans, Green Tripartite Agreement, biodiversity and nature restoration, but also decisions about, for example, renewable energy, climate adaptation, new roads, residential areas, and industrial enterprises, where environmental aspects need to be considered.
25
+
26
+
27
+ ## Dataset Description
28
+
29
+ <!-- START-DESC-STATS -->
30
+ - **Language**: dan, dansk, Danish
31
+ - **Number of samples**: 2.17K
32
+ - **Number of tokens (Llama 3)**: 128.48M
33
+ - **Average document length (characters)**: 224704.25
34
+ <!-- END-DESC-STATS -->
35
+
36
+
37
+ ## Dataset Structure
38
+ An example from the dataset looks as follows.
39
+
40
+
41
+ <!-- START-SAMPLE -->
42
+ ```py
43
+ {
44
+ "text": "Bila110 g 1 101 10 - miljTIL R lj TIL RTIL RøraÆTSHUSKO pp ÆTSHUS KOÆTSHUS Kort\n\nLOKALPLAN NR[...]",
45
+ "source": "miljoeportalen",
46
+ "id": "miljoeportalen_0",
47
+ "added": "2025-03-24",
48
+ "created": "2024-01-01, 2025-01-01",
49
+ "license": "cc0",
50
+ "domain": "Web",
51
+ "metadata": {
52
+ "source-pretty": "Miljøportalen"
53
+ }
54
+ }
55
+ ```
56
+
57
+ ### Data Fields
58
+
59
+ An entry in the dataset consists of the following fields:
60
+
61
+ - `text`(`str`): The content of the document.
62
+ - `source` (`str`): The source of the document (see [Source Data](#source-data)).
63
+ - `id` (`str`): An unique identifier for each document.
64
+ - `added` (`str`): An date for when the document was added to this collection.
65
+ - `created` (`str`): An date range for when the document was originally created.
66
+ - `license` (`str`): The license of the document. The licenses vary according to the source.
67
+ - `domain` (`str`): The domain of the source
68
+ - `metadata/source-pretty` (`str`): The long form version of the short-form source name
69
+ - `metadata/*`: Potentially additional metadata
70
+ <!-- END-SAMPLE -->
71
+
72
+
73
+ ### Dataset Statistics
74
+
75
+ <!-- START-DATASET PLOTS -->
76
+ <img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
77
+ <img>
78
+ <!-- END-DATASET PLOTS -->
79
+
80
+
81
+
82
+ ## Additional Information
83
+
84
+
85
+ <!-- ### Data includes
86
+
87
+ TODO: KRISTIAN I assume this is just the website or is it also reports?
88
+ -->
89
+
90
+ ### License information
91
+ This dataset is licensed under CCO this license was clarified by support@miljoeportal.dk:
92
+
93
+ > Data er underlagt Creative Common CC0, se:
94
+ > https://creativecommons.org/publicdomain/zero/1.0/deed.da.
95
+ >
96
+ > Lad mig vide hvis du har yderligere spørgsmål.
97
+ > Har du spørgsmål til din sag eller yderligere kommentarer, bedes du besvare denne mail.
98
+
99
+
100
+
101
+ ### Citation Information
102
+
103
+ No citation is applicable for this work.
data/miljoeportalen/miljoeportalen.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5342a7dbeaf5fa93b48b78f16308867d07e0c0ec1053830f0df7acb8f774eacf
3
+ size 169639678
pyproject.toml CHANGED
@@ -1,6 +1,6 @@
1
  [project]
2
  name = "danish-dynaword"
3
- version = "1.0.10"
4
  description = "project code for the danish dynaword project"
5
  readme = "README.md"
6
  requires-python = ">=3.12,<3.13" # 3.13 have issues with spacy and pytorch
 
1
  [project]
2
  name = "danish-dynaword"
3
+ version = "1.0.11"
4
  description = "project code for the danish dynaword project"
5
  readme = "README.md"
6
  requires-python = ">=3.12,<3.13" # 3.13 have issues with spacy and pytorch
src/bump_version.py CHANGED
@@ -26,6 +26,7 @@ def update_readme(version: str, readme_path: Path) -> None:
26
  """Find version in README table and update it."""
27
  start = "<!-- START README TABLE -->"
28
  end = "<!-- END README TABLE -->"
 
29
 
30
  with readme_path.open("r") as f:
31
  lines = f.readlines()
@@ -36,7 +37,7 @@ def update_readme(version: str, readme_path: Path) -> None:
36
  in_table = True
37
  if in_table:
38
  if "**Version**" in line:
39
- lines[i] = f"| **Version** | {version} |\n"
40
  break
41
  if end in line:
42
  raise ValueError("**Version** not found in README table.")
 
26
  """Find version in README table and update it."""
27
  start = "<!-- START README TABLE -->"
28
  end = "<!-- END README TABLE -->"
29
+
30
 
31
  with readme_path.open("r") as f:
32
  lines = f.readlines()
 
37
  in_table = True
38
  if in_table:
39
  if "**Version**" in line:
40
+ lines[i] = f"| **Version** | {version} ([Changelog](/CHANGELOG.md)) |\n"
41
  break
42
  if end in line:
43
  raise ValueError("**Version** not found in README table.")
src/tests/test_dataset_schema.py CHANGED
@@ -48,7 +48,7 @@ def test_sample_schema(repo_path: Path, dataset_name: str):
48
  class FrontmatterSchema(BaseModel):
49
  pretty_name: str
50
  language: list[Literal["da"]]
51
- license: Literal["cc0-1.0", "other", "cc-by-sa-4.0"]
52
 
53
 
54
  @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
 
48
  class FrontmatterSchema(BaseModel):
49
  pretty_name: str
50
  language: list[Literal["da"]]
51
+ license: Literal["cc0-1.0", "other", "cc-by-sa-4.0", "apache-2.0"]
52
 
53
 
54
  @pytest.mark.parametrize("dataset_name", DATASET_NAMES)
src/tests/test_load.py CHANGED
@@ -4,6 +4,7 @@ from datasets import load_dataset
4
 
5
  from tests.readme_parsing import read_frontmatter_and_body
6
 
 
7
 
8
  def test_dataset_loads(repo_path: Path):
9
  """Ensures that the dataset can load as intended"""
@@ -26,4 +27,5 @@ def test_all_datasets_in_yaml(repo_path: Path):
26
  datasets = data_folder.glob("*")
27
 
28
  for dataset in datasets:
29
- assert dataset.name in ds_names
 
 
4
 
5
  from tests.readme_parsing import read_frontmatter_and_body
6
 
7
+ REMOVED_DATA = ["lexdk"] # data that has been removed due to legal disputes, question about legality, or similar
8
 
9
  def test_dataset_loads(repo_path: Path):
10
  """Ensures that the dataset can load as intended"""
 
27
  datasets = data_folder.glob("*")
28
 
29
  for dataset in datasets:
30
+ if dataset.name not in REMOVED_DATA:
31
+ assert dataset.name in ds_names
src/update_descriptive_statistics.py CHANGED
@@ -14,6 +14,7 @@ import multiprocessing
14
  from dataclasses import dataclass
15
  from pathlib import Path
16
  from textwrap import dedent
 
17
  from typing import Self, cast
18
 
19
  import pandas as pd
@@ -64,12 +65,13 @@ def _count_tokens(batch):
64
  def calculate_number_of_tokens(
65
  dataset: Dataset,
66
  text_column: str = "text",
 
67
  ) -> int:
68
  token_counts = dataset.map(
69
  _count_tokens,
70
  batched=True,
71
  batch_size=1000,
72
- num_proc=multiprocessing.cpu_count(),
73
  )
74
  return sum(token_counts["token_count"])
75
 
@@ -82,11 +84,11 @@ class DescriptiveStatsOverview:
82
  language: str = "dan, dansk, Danish"
83
 
84
  @classmethod
85
- def from_dataset(cls, dataset: Dataset) -> Self:
86
  return cls(
87
  number_of_samples=len(dataset),
88
  average_document_length=calculate_average_document_length(dataset),
89
- number_of_tokens=calculate_number_of_tokens(dataset),
90
  )
91
 
92
  def to_markdown(self) -> str:
@@ -149,8 +151,10 @@ def add_sample(markdown_path: Path, dataset: Dataset, max_str_len: int = 100):
149
  logger.info("Adding dataset sample to readme")
150
  sample = dataset[0]
151
  for k in sample:
152
- if isinstance(k, str) and len(sample[k]) > max_str_len:
153
  sample[k] = sample[k][:max_str_len] + "[...]"
 
 
154
 
155
  json_sample = json.dumps(sample, indent=2, ensure_ascii=False)
156
  sample_str = sample_template.format(sample=json_sample)
@@ -205,9 +209,10 @@ def add_desc_statitics(
205
  markdown_path: Path,
206
  dataset: Dataset,
207
  desc_stats_path: Path,
 
208
  ) -> None:
209
  logger.info("Adding descriptive statistics to readme.")
210
- desc_stats = DescriptiveStatsOverview.from_dataset(dataset)
211
  desc_stats.to_disk(desc_stats_path)
212
  desc_stats.add_to_markdown(markdown_path)
213
 
@@ -217,6 +222,7 @@ def update_dataset(
217
  name: str,
218
  readme_name: None | str = None,
219
  force: bool = False,
 
220
  ) -> None:
221
  rev = get_latest_revision(dataset_path)
222
  desc_stats_path = dataset_path / "descriptive_stats.json"
@@ -241,7 +247,7 @@ def update_dataset(
241
  ds = load_dataset(str(repo_path), name, split="train")
242
  ds = cast(Dataset, ds)
243
 
244
- add_desc_statitics(markdown_path, ds, desc_stats_path)
245
  add_sample(markdown_path, ds)
246
  add_descriptive_statistics_plots(markdown_path, ds)
247
 
@@ -275,6 +281,12 @@ def create_parser():
275
  type=str,
276
  help="The repository where to calculate the descriptive statistics from",
277
  )
 
 
 
 
 
 
278
  return parser
279
 
280
 
@@ -297,6 +309,7 @@ def create_main_table(repo_path: Path = repo_path) -> tuple[pd.DataFrame, str, s
297
  license_references = (
298
  "[CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en\n"
299
  + "[CC-BY-SA 4.0]: https://creativecommons.org/licenses/by-sa/4.0/deed.en\n"
 
300
  )
301
 
302
  for dataset in datasets:
@@ -331,7 +344,9 @@ def create_main_table(repo_path: Path = repo_path) -> tuple[pd.DataFrame, str, s
331
  table["N. Tokens"] += [sum(table["N. Tokens"])]
332
 
333
  df = pd.DataFrame.from_dict(table)
 
334
  df["N. Tokens"] = df["N. Tokens"].apply(human_readable_large_int)
 
335
  return df, readme_references, license_references
336
 
337
 
@@ -352,21 +367,28 @@ def main(
352
  logging_level: int = 20,
353
  force: bool = False,
354
  repo_path: Path = repo_path,
 
355
  ) -> None:
356
  logging.basicConfig(level=logging_level)
357
 
 
 
358
  if dataset and dataset != "default":
359
  dataset_path = repo_path / "data" / dataset
360
- update_dataset(dataset_path, dataset_path.name, force=force)
361
  return
362
 
363
  if dataset is None:
364
  datasets = (repo_path / "data").glob("*")
365
  for dataset_path in datasets:
366
- update_dataset(dataset_path, dataset_path.name, force=force)
 
 
367
 
368
  if dataset is None or dataset == "default":
369
- update_dataset(repo_path, "default", "README.md", force=force)
 
 
370
  update_main_table(repo_path)
371
 
372
 
@@ -379,4 +401,5 @@ if __name__ == "__main__":
379
  logging_level=args.logging_level,
380
  force=args.force,
381
  repo_path=Path(args.repo_path),
 
382
  )
 
14
  from dataclasses import dataclass
15
  from pathlib import Path
16
  from textwrap import dedent
17
+ from datetime import datetime
18
  from typing import Self, cast
19
 
20
  import pandas as pd
 
65
  def calculate_number_of_tokens(
66
  dataset: Dataset,
67
  text_column: str = "text",
68
+ num_proc: int = 1,
69
  ) -> int:
70
  token_counts = dataset.map(
71
  _count_tokens,
72
  batched=True,
73
  batch_size=1000,
74
+ num_proc=num_proc,
75
  )
76
  return sum(token_counts["token_count"])
77
 
 
84
  language: str = "dan, dansk, Danish"
85
 
86
  @classmethod
87
+ def from_dataset(cls, dataset: Dataset, num_proc: int = 1) -> Self:
88
  return cls(
89
  number_of_samples=len(dataset),
90
  average_document_length=calculate_average_document_length(dataset),
91
+ number_of_tokens=calculate_number_of_tokens(dataset, num_proc=num_proc),
92
  )
93
 
94
  def to_markdown(self) -> str:
 
151
  logger.info("Adding dataset sample to readme")
152
  sample = dataset[0]
153
  for k in sample:
154
+ if isinstance(sample[k], str) and len(sample[k]) > max_str_len:
155
  sample[k] = sample[k][:max_str_len] + "[...]"
156
+ if isinstance(sample[k], datetime):
157
+ sample[k] = str(sample[k])
158
 
159
  json_sample = json.dumps(sample, indent=2, ensure_ascii=False)
160
  sample_str = sample_template.format(sample=json_sample)
 
209
  markdown_path: Path,
210
  dataset: Dataset,
211
  desc_stats_path: Path,
212
+ num_proc: int = 1,
213
  ) -> None:
214
  logger.info("Adding descriptive statistics to readme.")
215
+ desc_stats = DescriptiveStatsOverview.from_dataset(dataset, num_proc=num_proc)
216
  desc_stats.to_disk(desc_stats_path)
217
  desc_stats.add_to_markdown(markdown_path)
218
 
 
222
  name: str,
223
  readme_name: None | str = None,
224
  force: bool = False,
225
+ num_proc: int = 1,
226
  ) -> None:
227
  rev = get_latest_revision(dataset_path)
228
  desc_stats_path = dataset_path / "descriptive_stats.json"
 
247
  ds = load_dataset(str(repo_path), name, split="train")
248
  ds = cast(Dataset, ds)
249
 
250
+ add_desc_statitics(markdown_path, ds, desc_stats_path, num_proc=num_proc)
251
  add_sample(markdown_path, ds)
252
  add_descriptive_statistics_plots(markdown_path, ds)
253
 
 
281
  type=str,
282
  help="The repository where to calculate the descriptive statistics from",
283
  )
284
+ parser.add_argument(
285
+ "--num_proc",
286
+ default=multiprocessing.cpu_count(),
287
+ type=int,
288
+ help="The number of processes to use.",
289
+ )
290
  return parser
291
 
292
 
 
309
  license_references = (
310
  "[CC-0]: https://creativecommons.org/publicdomain/zero/1.0/legalcode.en\n"
311
  + "[CC-BY-SA 4.0]: https://creativecommons.org/licenses/by-sa/4.0/deed.en\n"
312
+ + "[Apache 2.0]: https://www.apache.org/licenses/LICENSE-2.0\n"
313
  )
314
 
315
  for dataset in datasets:
 
344
  table["N. Tokens"] += [sum(table["N. Tokens"])]
345
 
346
  df = pd.DataFrame.from_dict(table)
347
+ df = df.sort_values("N. Tokens")
348
  df["N. Tokens"] = df["N. Tokens"].apply(human_readable_large_int)
349
+
350
  return df, readme_references, license_references
351
 
352
 
 
367
  logging_level: int = 20,
368
  force: bool = False,
369
  repo_path: Path = repo_path,
370
+ num_proc: int | None = None,
371
  ) -> None:
372
  logging.basicConfig(level=logging_level)
373
 
374
+ num_proc = multiprocessing.cpu_count() if num_proc is None else num_proc
375
+
376
  if dataset and dataset != "default":
377
  dataset_path = repo_path / "data" / dataset
378
+ update_dataset(dataset_path, dataset_path.name, force=force, num_proc=num_proc)
379
  return
380
 
381
  if dataset is None:
382
  datasets = (repo_path / "data").glob("*")
383
  for dataset_path in datasets:
384
+ update_dataset(
385
+ dataset_path, dataset_path.name, force=force, num_proc=num_proc
386
+ )
387
 
388
  if dataset is None or dataset == "default":
389
+ update_dataset(
390
+ repo_path, "default", "README.md", force=force, num_proc=num_proc
391
+ )
392
  update_main_table(repo_path)
393
 
394
 
 
401
  logging_level=args.logging_level,
402
  force=args.force,
403
  repo_path=Path(args.repo_path),
404
+ num_proc=args.num_proc,
405
  )
test_results.log CHANGED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ============================= test session starts ==============================
2
+ platform linux -- Python 3.12.3, pytest-8.3.4, pluggy-1.5.0
3
+ rootdir: /work/githubs/tmp/danish-dynaword
4
+ configfile: pyproject.toml
5
+ collected 124 items
6
+
7
+ src/tests/test_dataset_schema.py ....................................... [ 31%]
8
+ ................................................... [ 72%]
9
+ src/tests/test_duplicates.py sssssssssssssssssssssssssssssss [ 97%]
10
+ src/tests/test_load.py .. [ 99%]
11
+ src/tests/test_unique_ids.py . [100%]
12
+
13
+ ======================= 93 passed, 31 skipped in 20.62s ========================
uv.lock CHANGED
@@ -202,7 +202,7 @@ wheels = [
202
 
203
  [[package]]
204
  name = "danish-dynaword"
205
- version = "1.0.9"
206
  source = { virtual = "." }
207
  dependencies = [
208
  { name = "datasets" },
 
202
 
203
  [[package]]
204
  name = "danish-dynaword"
205
+ version = "1.0.11"
206
  source = { virtual = "." }
207
  dependencies = [
208
  { name = "datasets" },