Datasets:
Tasks:
Text Generation
Formats:
parquet
Sub-tasks:
language-modeling
Languages:
Danish
Size:
1M - 10M
License:
Rohambarack
commited on
Commit
·
17ce613
1
Parent(s):
72ebab9
adding Danish subset of the NCC
Browse files- CHANGELOG.md +9 -0
- README.md +5 -1
- data/ncc/create.py +256 -0
- data/ncc/descriptive_stats.json +7 -0
- data/ncc/images/dist_document_length.png +3 -0
- data/ncc/ncc.md +132 -0
- data/ncc/ncc.parquet +3 -0
- pyproject.toml +1 -1
- uv.lock +1 -1
CHANGELOG.md
CHANGED
@@ -5,6 +5,15 @@ All notable changes to this project will be documented in this file.
|
|
5 |
|
6 |
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
## [v1.0.11] - 2025-03-29
|
9 |
|
10 |
### Added
|
|
|
5 |
|
6 |
The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/).
|
7 |
|
8 |
+
|
9 |
+
## [v1.0.12] - 2025-04-16
|
10 |
+
|
11 |
+
### Added
|
12 |
+
|
13 |
+
- Added new dataset ( ~1.61B tokens)
|
14 |
+
- Norwegian Colossal Corpus (ncc)
|
15 |
+
|
16 |
+
|
17 |
## [v1.0.11] - 2025-03-29
|
18 |
|
19 |
### Added
|
README.md
CHANGED
@@ -125,6 +125,10 @@ configs:
|
|
125 |
data_files:
|
126 |
- split: train
|
127 |
path: data/nota/*.parquet
|
|
|
|
|
|
|
|
|
128 |
annotations_creators:
|
129 |
- no-annotation
|
130 |
language_creators:
|
@@ -158,7 +162,7 @@ https://github.com/huggingface/datasets/blob/main/templates/README_guide.md
|
|
158 |
<!-- START README TABLE -->
|
159 |
| | |
|
160 |
| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
161 |
-
| **Version** | 1.0.
|
162 |
| **Language** | dan, dansk, Danish |
|
163 |
| **License** | Permissible, See the respective dataset |
|
164 |
| **Models** | For model trained used this data see [danish-foundation-models](https://huggingface.co/danish-foundation-models) |
|
|
|
125 |
data_files:
|
126 |
- split: train
|
127 |
path: data/nota/*.parquet
|
128 |
+
- config_name: ncc
|
129 |
+
data_files:
|
130 |
+
- split: train
|
131 |
+
path: data/ncc/*.parquet
|
132 |
annotations_creators:
|
133 |
- no-annotation
|
134 |
language_creators:
|
|
|
162 |
<!-- START README TABLE -->
|
163 |
| | |
|
164 |
| ------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
165 |
+
| **Version** | 1.0.13 ([Changelog](/CHANGELOG.md)) |
|
166 |
| **Language** | dan, dansk, Danish |
|
167 |
| **License** | Permissible, See the respective dataset |
|
168 |
| **Models** | For model trained used this data see [danish-foundation-models](https://huggingface.co/danish-foundation-models) |
|
data/ncc/create.py
ADDED
@@ -0,0 +1,256 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# /// script
|
2 |
+
# requires-python = ">=3.12"
|
3 |
+
# dependencies = [
|
4 |
+
# "datasets>=3.2.0",
|
5 |
+
# ]
|
6 |
+
# ///
|
7 |
+
# setup
|
8 |
+
from pathlib import Path
|
9 |
+
from datetime import datetime
|
10 |
+
from datasets import Dataset, load_dataset
|
11 |
+
|
12 |
+
source = "ncc"
|
13 |
+
|
14 |
+
|
15 |
+
# functions
|
16 |
+
def word_tokenize(text: str) -> list[str]:
|
17 |
+
"""
|
18 |
+
Tokenizes a string into words, splitting on whitespace and punctuation.
|
19 |
+
|
20 |
+
Example:
|
21 |
+
>>> word_tokenize("Hello, world!")
|
22 |
+
['Hello', ',', 'world', '!']
|
23 |
+
>>> word_tokenize("This is a test.")
|
24 |
+
['This', 'is', 'a', 'test', '.']
|
25 |
+
>>> word_tokenize("Many spaces between words.")
|
26 |
+
['Many', 'spaces', 'between', 'words', '.']
|
27 |
+
"""
|
28 |
+
|
29 |
+
punkt = [",", ".", "!", "?", ":", ";", "(", ")", "[", "]", "{", "}", '"', "'"]
|
30 |
+
for p in punkt:
|
31 |
+
text = text.replace(p, f" {p} ")
|
32 |
+
return text.split()
|
33 |
+
|
34 |
+
|
35 |
+
def count_min_target(given_list: list, target_list: list, min: int) -> bool:
|
36 |
+
"""
|
37 |
+
Iterates through given list, until at least min items match any items from target list
|
38 |
+
|
39 |
+
"""
|
40 |
+
c_item = 0
|
41 |
+
given_list_iter = iter(given_list)
|
42 |
+
while c_item < min:
|
43 |
+
try:
|
44 |
+
current_item = next(given_list_iter)
|
45 |
+
if current_item in target_list:
|
46 |
+
c_item += 1
|
47 |
+
except StopIteration:
|
48 |
+
break
|
49 |
+
|
50 |
+
return c_item == min
|
51 |
+
|
52 |
+
|
53 |
+
def min_alpha_ratio(text: str | list[str], min: float = 0.7) -> bool:
|
54 |
+
"""
|
55 |
+
If not split already to words, splits text with word_tokenize()
|
56 |
+
Calculates ratio of words with only alphabetical characters
|
57 |
+
Compares it to min
|
58 |
+
|
59 |
+
"""
|
60 |
+
if type(text) is str:
|
61 |
+
text = word_tokenize(text)
|
62 |
+
else:
|
63 |
+
pass
|
64 |
+
|
65 |
+
alpha_ratio = 1 - sum(not word.isalpha() for word in text) / len(text)
|
66 |
+
|
67 |
+
return alpha_ratio >= min
|
68 |
+
|
69 |
+
|
70 |
+
def lookup_ref_dict(ref_dictionary: dict[str, list[str]], string_item: str) -> str:
|
71 |
+
"""
|
72 |
+
Takes a reference dictionary and an item,
|
73 |
+
Outputs the key, where the item contains any element in the value list.
|
74 |
+
e.g:
|
75 |
+
ref_dictionary = {"ab": ["a","b"],
|
76 |
+
"cd": ["c","d"]
|
77 |
+
}
|
78 |
+
string_item = "*a*" | "*b*"
|
79 |
+
output = "ab"
|
80 |
+
|
81 |
+
!!! WARNING: will return last match !!!
|
82 |
+
string_item = "*a*d*"
|
83 |
+
output = "cd"
|
84 |
+
|
85 |
+
"""
|
86 |
+
for key, values in ref_dictionary.items():
|
87 |
+
for each_value in values:
|
88 |
+
if each_value in string_item:
|
89 |
+
output = key
|
90 |
+
else:
|
91 |
+
pass
|
92 |
+
|
93 |
+
try:
|
94 |
+
return output
|
95 |
+
except UnboundLocalError:
|
96 |
+
print(f"WARNING: ref_lookup_dict() unknown value in data --> {string_item}")
|
97 |
+
|
98 |
+
|
99 |
+
class document_filter:
|
100 |
+
"""
|
101 |
+
Document filtering from a dictionary
|
102 |
+
Made for https://huggingface.co/datasets/NbAiLab/NCC
|
103 |
+
|
104 |
+
confidence in language > 0.5, -> below mostly noise
|
105 |
+
check language == da, -> unwanted data if not
|
106 |
+
text length > 10 words, -> short text, likely noise
|
107 |
+
check alpha > 0.7, -> too many words with numbers in them, likely
|
108 |
+
noise
|
109 |
+
stopwords > 2, -> no stopwords, likely not coherent text, likely
|
110 |
+
noise
|
111 |
+
|
112 |
+
"""
|
113 |
+
|
114 |
+
def __init__(
|
115 |
+
self,
|
116 |
+
req_language: str = "da",
|
117 |
+
min_conf: float = 0.5,
|
118 |
+
min_w_l: int = 10,
|
119 |
+
min_alpha: float = 0.7,
|
120 |
+
min_s_w_l: int = 2,
|
121 |
+
):
|
122 |
+
self.req_language = req_language
|
123 |
+
self.min_conf = min_conf
|
124 |
+
self.min_w_l = min_w_l
|
125 |
+
self.min_alpha = min_alpha
|
126 |
+
self.min_s_w_l = min_s_w_l
|
127 |
+
self.date_today = datetime.now().strftime("%Y-%m-%d")
|
128 |
+
|
129 |
+
def first_layer_filter(self, meta_document: dict[str, str | int]) -> bool:
|
130 |
+
"""
|
131 |
+
Filtering based on already available data in the dictionary:
|
132 |
+
Language
|
133 |
+
Confidence in language classification
|
134 |
+
|
135 |
+
"""
|
136 |
+
language = meta_document.get("lang_fasttext")
|
137 |
+
confidence = float(meta_document.get("lang_fasttext_conf"))
|
138 |
+
|
139 |
+
return (confidence >= self.min_conf) and (language == self.req_language)
|
140 |
+
|
141 |
+
def second_layer_filter(self, text: str, stop_words: list[str]) -> bool:
|
142 |
+
"""
|
143 |
+
Filtering based on data derived from the document text:
|
144 |
+
text length:
|
145 |
+
text is segmented to words by word_tokenize()
|
146 |
+
measured by len()
|
147 |
+
alpha ratio:
|
148 |
+
by min_alpha_ratio()
|
149 |
+
minimum stop words present:
|
150 |
+
by count_min_target()
|
151 |
+
"""
|
152 |
+
|
153 |
+
word_list = word_tokenize(text)
|
154 |
+
|
155 |
+
text_length_pass = len(word_list) >= self.min_w_l
|
156 |
+
alpha_pass = min_alpha_ratio(word_list, self.min_alpha)
|
157 |
+
s_w_pass = count_min_target(word_list, stop_words, self.min_s_w_l)
|
158 |
+
|
159 |
+
return text_length_pass and alpha_pass and s_w_pass
|
160 |
+
|
161 |
+
def dynaword_format(
|
162 |
+
self,
|
163 |
+
meta_document: dict[str, str | int],
|
164 |
+
ref_dictionary_license: dict[str, list[str]],
|
165 |
+
ref_dictionary_domain: dict[str, list[str]],
|
166 |
+
) -> dict[str, str | dict[str, str]]:
|
167 |
+
"""Reformats data to fit dynaword standards"""
|
168 |
+
|
169 |
+
text = meta_document.get("text")
|
170 |
+
id = meta_document.get("id")
|
171 |
+
date = meta_document.get("publish_year")
|
172 |
+
doc_type = meta_document.get("doc_type")
|
173 |
+
|
174 |
+
newdata = {
|
175 |
+
"text": text,
|
176 |
+
"source": "ncc",
|
177 |
+
"id": id,
|
178 |
+
"added": self.date_today,
|
179 |
+
"created": f"{date}-01-01, {date}-12-31",
|
180 |
+
"license": lookup_ref_dict(ref_dictionary_license, doc_type),
|
181 |
+
"domain": lookup_ref_dict(ref_dictionary_domain, doc_type),
|
182 |
+
"metadata": {
|
183 |
+
"source-pretty": "Norwegian Colossal Corpus",
|
184 |
+
"source-type": doc_type,
|
185 |
+
},
|
186 |
+
}
|
187 |
+
|
188 |
+
return newdata
|
189 |
+
|
190 |
+
|
191 |
+
# main
|
192 |
+
def main():
|
193 |
+
# filtering setup
|
194 |
+
stop_words = [
|
195 |
+
'ad', 'af', 'alle', 'alt', 'anden', 'at', 'blev', 'blive', 'bliver', 'da', 'de', 'dem', 'den', 'denne', 'der', 'deres', 'det', 'dette', 'dig', 'din', 'disse', 'dog', 'du', 'efter', 'eller', 'en', 'end', 'er', 'et', 'for', 'fra', 'ham', 'han', 'hans', 'har', 'havde', 'have', 'hende', 'hendes', 'her', 'hos', 'hun', 'hvad', 'hvis', 'hvor', 'i', 'ikke', 'ind', 'jeg', 'jer', 'jo', 'kunne', 'man', 'mange', 'med', 'meget', 'men', 'mig', 'min', 'mine', 'mit', 'mod', 'ned', 'noget', 'nogle', 'nu', 'når', 'og', 'også', 'om', 'op', 'os', 'over', 'på', 'selv', 'sig', 'sin', 'sine', 'sit', 'skal', 'skulle', 'som', 'sådan', 'thi', 'til', 'ud', 'under', 'var', 'vi', 'vil', 'ville', 'vor', 'være', 'været'
|
196 |
+
]
|
197 |
+
doc_filter = document_filter()
|
198 |
+
da_data = []
|
199 |
+
|
200 |
+
# formatting setup
|
201 |
+
ref_dictionary_license = {
|
202 |
+
"other": ["government", "parliament", "publicreport", "lovdata", "maalfrid","wikipedia"],
|
203 |
+
"cc0-1.0": ["newspaper", "book"]
|
204 |
+
}
|
205 |
+
|
206 |
+
ref_dictionary_domain = {
|
207 |
+
"Legal": ["government", "parliament", "publicreport", "lovdata", "maalfrid"],
|
208 |
+
"News": ["newspaper"],
|
209 |
+
"Wiki & Books": ["book", "wikipedia"],
|
210 |
+
}
|
211 |
+
|
212 |
+
|
213 |
+
## load all data first to get splits, then load and filter by split
|
214 |
+
data = load_dataset("NbAiLab/NCC", streaming=True)
|
215 |
+
data_splits=list(reversed(data.keys()))
|
216 |
+
|
217 |
+
|
218 |
+
for current_split in data_splits:
|
219 |
+
data = load_dataset("NbAiLab/NCC", streaming=True, split=current_split)
|
220 |
+
data_iter = iter(data)
|
221 |
+
|
222 |
+
# filtering and formatting
|
223 |
+
while True:
|
224 |
+
try:
|
225 |
+
current_text = next(data_iter)
|
226 |
+
|
227 |
+
meta_data_filtering = doc_filter.first_layer_filter(current_text)
|
228 |
+
|
229 |
+
if meta_data_filtering:
|
230 |
+
text_filtering = doc_filter.second_layer_filter(
|
231 |
+
current_text.get("text"), stop_words
|
232 |
+
)
|
233 |
+
|
234 |
+
if meta_data_filtering and text_filtering:
|
235 |
+
# formatting
|
236 |
+
dynaform = doc_filter.dynaword_format(
|
237 |
+
current_text, ref_dictionary_license, ref_dictionary_domain
|
238 |
+
)
|
239 |
+
|
240 |
+
da_data.append(dynaform)
|
241 |
+
else:
|
242 |
+
pass
|
243 |
+
else:
|
244 |
+
pass
|
245 |
+
|
246 |
+
except StopIteration:
|
247 |
+
break
|
248 |
+
|
249 |
+
### saving
|
250 |
+
ds = Dataset.from_list(da_data)
|
251 |
+
save_path = Path(__file__).parent / f"{source}.parquet"
|
252 |
+
ds.to_parquet(save_path)
|
253 |
+
|
254 |
+
|
255 |
+
if __name__ == "__main__":
|
256 |
+
main()
|
data/ncc/descriptive_stats.json
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"number_of_samples": 65301,
|
3 |
+
"average_document_length": 70916.04932543146,
|
4 |
+
"number_of_tokens": 1606197164,
|
5 |
+
"language": "dan, dansk, Danish",
|
6 |
+
"revision": "72ebab94b5331169630c823308470471687bb921"
|
7 |
+
}
|
data/ncc/images/dist_document_length.png
ADDED
![]() |
Git LFS Details
|
data/ncc/ncc.md
ADDED
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
pretty_name: Norwegian Colossal Corpus
|
3 |
+
language:
|
4 |
+
- da
|
5 |
+
license: other
|
6 |
+
license_name: CC0 1.0, NLOD 2.0, CC BY-SA 3.0
|
7 |
+
task_categories:
|
8 |
+
- text-generation
|
9 |
+
- fill-mask
|
10 |
+
task_ids:
|
11 |
+
- language-modeling
|
12 |
+
---
|
13 |
+
|
14 |
+
# Dataset Card for Norwegian Colossal Corpus
|
15 |
+
|
16 |
+
<!-- START-SHORT DESCRIPTION -->
|
17 |
+
Danish language subset of [NCC](https://huggingface.co/datasets/NbAiLab/NCC)
|
18 |
+
<!-- END-SHORT DESCRIPTION -->
|
19 |
+
|
20 |
+
The Norwegian Colossal Corpus is a collection of multiple smaller Norwegian corpuses suitable for training large language models. \
|
21 |
+
(desc. taken from [NCC](https://huggingface.co/datasets/NbAiLab/NCC))
|
22 |
+
|
23 |
+
This subset is the result of the following filtering from all availabel data splits:
|
24 |
+
- Document is marked as Danish
|
25 |
+
- Confidence of the language classificationis at least 0.5
|
26 |
+
- Document has at least 10 words (whitespace separated strings + punctuation)
|
27 |
+
- The ratio of all words / words with only alphabetical characters is at least 0.7
|
28 |
+
- The document contains at least 2 Danish stop words
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
## Dataset Description
|
33 |
+
|
34 |
+
<!-- START-DESC-STATS -->
|
35 |
+
- **Language**: dan, dansk, Danish
|
36 |
+
- **Number of samples**: 65.30K
|
37 |
+
- **Number of tokens (Llama 3)**: 1.61B
|
38 |
+
- **Average document length (characters)**: 70916.05
|
39 |
+
<!-- END-DESC-STATS -->
|
40 |
+
|
41 |
+
|
42 |
+
## Dataset Structure
|
43 |
+
An example from the dataset looks as follows.
|
44 |
+
|
45 |
+
|
46 |
+
<!-- START-SAMPLE -->
|
47 |
+
```py
|
48 |
+
{
|
49 |
+
"text": "h) ved beregningen omhandlet i litra f) kan pengemarkedsinstrumenter eller andele eller kapitalandel[...]",
|
50 |
+
"source": "ncc",
|
51 |
+
"id": "maalfrid_2ede28a2c9ba7b4c0162681385ab60f99e021bfa_25",
|
52 |
+
"added": "2025-04-15",
|
53 |
+
"created": "2021-01-01, 2021-12-31",
|
54 |
+
"license": "other",
|
55 |
+
"domain": "Legal",
|
56 |
+
"metadata": {
|
57 |
+
"source-pretty": "Norwegian Colossal Corpus",
|
58 |
+
"source-type": "maalfrid_regjeringen"
|
59 |
+
}
|
60 |
+
}
|
61 |
+
```
|
62 |
+
|
63 |
+
### Data Fields
|
64 |
+
|
65 |
+
An entry in the dataset consists of the following fields:
|
66 |
+
|
67 |
+
- `text`(`str`): The content of the document.
|
68 |
+
- `source` (`str`): The source of the document (see [Source Data](#source-data)).
|
69 |
+
- `id` (`str`): An unique identifier for each document.
|
70 |
+
- `added` (`str`): An date for when the document was added to this collection.
|
71 |
+
- `created` (`str`): An date range for when the document was originally created.
|
72 |
+
- `license` (`str`): The license of the document. The licenses vary according to the source.
|
73 |
+
- `domain` (`str`): The domain of the source
|
74 |
+
- `metadata/source-pretty` (`str`): The long form version of the short-form source name
|
75 |
+
- `metadata/*`: Potentially additional metadata
|
76 |
+
<!-- END-SAMPLE -->
|
77 |
+
|
78 |
+
|
79 |
+
### Dataset Statistics
|
80 |
+
|
81 |
+
<!-- START-DATASET PLOTS -->
|
82 |
+
<img src="./images/dist_document_length.png" width="600" style="margin-right: 10px;" />
|
83 |
+
<img>
|
84 |
+
<!-- END-DATASET PLOTS -->
|
85 |
+
|
86 |
+
|
87 |
+
|
88 |
+
## Additional Information
|
89 |
+
|
90 |
+
## License Information
|
91 |
+
The dataset consists of multiple types of documents, with various licenses:
|
92 |
+
- [NLOD 2.0](https://data.norge.no/nlod/en/2.0) Norwegian government, parliament and legal documents domain == "Legal" and license == "other" in the dataset
|
93 |
+
- [CC0 1.0](https://creativecommons.org/publicdomain/zero/1.0/)
|
94 |
+
- [CC BY-SA 3.0](https://creativecommons.org/licenses/by-sa/3.0/) Wikipedia articles marked as domain == "Wiki & Books" and license == "other"
|
95 |
+
|
96 |
+
|
97 |
+
### Citation Information
|
98 |
+
```bash
|
99 |
+
@inproceedings{kummervold-etal-2022-norwegian-colossal,
|
100 |
+
title = {The {N}orwegian colossal corpus: A text corpus for training large {N}orwegian language models},
|
101 |
+
author = {Kummervold, Per E and
|
102 |
+
Wetjen, Freddy and
|
103 |
+
De la Rosa, Javier},
|
104 |
+
booktitle = {Proceedings of the Thirteenth Language Resources and Evaluation Conference (LREC)},
|
105 |
+
year = {2022},
|
106 |
+
address = {Marseille, France},
|
107 |
+
publisher = {European Language Resources Association},
|
108 |
+
url = {https://aclanthology.org/2022.lrec-1.410},
|
109 |
+
pages = {3852--3860},
|
110 |
+
abstract = {Norwegian has been one of many languages lacking sufficient available text to train quality language models. In an attempt to bridge this gap, we introduce the Norwegian Colossal Corpus (NCC), which comprises 49GB of clean Norwegian textual data containing over 7B words. The NCC is composed of different and varied sources, ranging from books and newspapers to government documents and public reports, showcasing the various uses of the Norwegian language in society. The corpus contains mainly Norwegian Bokmål and Norwegian Nynorsk. Each document in the corpus is tagged with metadata that enables the creation of sub-corpora for specific needs. Its structure makes it easy to combine with large web archives that for licensing reasons could not be distributed together with the NCC. By releasing this corpus openly to the public, we hope to foster the creation of both better Norwegian language models and multilingual language models with support for Norwegian.},
|
111 |
+
}
|
112 |
+
|
113 |
+
@inproceedings{kummervold-etal-2021-operationalizing,
|
114 |
+
title = {Operationalizing a National Digital Library: The Case for a {N}orwegian Transformer Model},
|
115 |
+
author = {Kummervold, Per E and
|
116 |
+
De la Rosa, Javier and
|
117 |
+
Wetjen, Freddy and
|
118 |
+
Brygfjeld, Svein Arne},
|
119 |
+
booktitle = {Proceedings of the 23rd Nordic Conference on Computational Linguistics (NoDaLiDa)},
|
120 |
+
year = {2021},
|
121 |
+
address = {Reykjavik, Iceland (Online)},
|
122 |
+
publisher = {Linköping University Electronic Press, Sweden},
|
123 |
+
url = {https://aclanthology.org/2021.nodalida-main.3},
|
124 |
+
pages = {20--29},
|
125 |
+
abstract = {In this work, we show the process of building a large-scale training set from digital and digitized collections at a national library.
|
126 |
+
The resulting Bidirectional Encoder Representations from Transformers (BERT)-based language model for Norwegian outperforms multilingual BERT (mBERT) models
|
127 |
+
in several token and sequence classification tasks for both Norwegian Bokmål and Norwegian Nynorsk. Our model also improves the mBERT performance for other
|
128 |
+
languages present in the corpus such as English, Swedish, and Danish. For languages not included in the corpus, the weights degrade moderately while keeping strong multilingual properties. Therefore,
|
129 |
+
we show that building high-quality models within a memory institution using somewhat noisy optical character recognition (OCR) content is feasible, and we hope to pave the way for other memory institutions to follow.},
|
130 |
+
}
|
131 |
+
|
132 |
+
```
|
data/ncc/ncc.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f0366a6adccb2a85dded98063eb9dada0e6ef1125e0d819fe9982fa41edea529
|
3 |
+
size 2812773765
|
pyproject.toml
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
[project]
|
2 |
name = "danish-dynaword"
|
3 |
-
version = "1.0.
|
4 |
description = "project code for the danish dynaword project"
|
5 |
readme = "README.md"
|
6 |
requires-python = ">=3.12,<3.13" # 3.13 have issues with spacy and pytorch
|
|
|
1 |
[project]
|
2 |
name = "danish-dynaword"
|
3 |
+
version = "1.0.12"
|
4 |
description = "project code for the danish dynaword project"
|
5 |
readme = "README.md"
|
6 |
requires-python = ">=3.12,<3.13" # 3.13 have issues with spacy and pytorch
|
uv.lock
CHANGED
@@ -202,7 +202,7 @@ wheels = [
|
|
202 |
|
203 |
[[package]]
|
204 |
name = "danish-dynaword"
|
205 |
-
version = "1.0.
|
206 |
source = { virtual = "." }
|
207 |
dependencies = [
|
208 |
{ name = "datasets" },
|
|
|
202 |
|
203 |
[[package]]
|
204 |
name = "danish-dynaword"
|
205 |
+
version = "1.0.12"
|
206 |
source = { virtual = "." }
|
207 |
dependencies = [
|
208 |
{ name = "datasets" },
|