ReySajju742 commited on
Commit
2b435da
·
verified ·
1 Parent(s): 3d47fd0

Upload 11 files

Browse files
Files changed (3) hide show
  1. README.md +1 -13
  2. dataset_infos.json +68 -0
  3. ud_urdu_udtb.py +188 -0
README.md CHANGED
@@ -1,15 +1,3 @@
1
- ---
2
- license: apache-2.0
3
- task_categories:
4
- - text-classification
5
- language:
6
- - ur
7
- tags:
8
- - art
9
- pretty_name: urdu-udt
10
- size_categories:
11
- - 10K<n<100K
12
- ---
13
  # Summary
14
 
15
  The Urdu Universal Dependency Treebank was automatically converted from Urdu Dependency Treebank (UDTB) which is part of an ongoing effort of creating multi-layered treebanks for Hindi and Urdu.
@@ -74,4 +62,4 @@ Relations: converted from manual
74
  Contributors: Bhat, Riyaz Ahmad; Zeman, Daniel
75
  Contributing: elsewhere
76
  Contact: zeman@ufal.mff.cuni.cz
77
- ===============================================================================
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  # Summary
2
 
3
  The Urdu Universal Dependency Treebank was automatically converted from Urdu Dependency Treebank (UDTB) which is part of an ongoing effort of creating multi-layered treebanks for Hindi and Urdu.
 
62
  Contributors: Bhat, Riyaz Ahmad; Zeman, Daniel
63
  Contributing: elsewhere
64
  Contact: zeman@ufal.mff.cuni.cz
65
+ ===============================================================================
dataset_infos.json ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "default": {
3
+ "description": "The Urdu Universal Dependency Treebank was automatically converted from Urdu Dependency Treebank (UDTB), which is part of an ongoing effort of creating multi-layered treebanks for Hindi and Urdu.",
4
+ "citation": "@InCollection{bhathindi,\n Title = {The Hindi/Urdu Treebank Project},\n Author = {Bhat, Riyaz Ahmad and Bhatt, Rajesh and Farudi, Annahita and Klassen, Prescott and Narasimhan, Bhuvana and Palmer, Martha and Rambow, Owen and Sharma, Dipti Misra and Vaidya, Ashwini and Vishnu, Sri Ramagurumurthy and others},\n Booktitle = {Handbook of Linguistic Annotation},\n Publisher = {Springer Press}\n}\n\n@inproceedings{palmer2009hindi,\n title={Hindi syntax: Annotating dependency, lexical predicate-argument structure, and phrase structure},\n author={Palmer, Martha and Bhatt, Rajesh and Narasimhan, Bhuvana and Rambow, Owen and Sharma, Dipti Misra and Xia, Fei},\n booktitle={The 7th International Conference on Natural Language Processing},\n pages={14--17},\n year={2009}\n}",
5
+ "homepage": "https://universaldependencies.org/",
6
+ "license": "CC BY-NC-SA 4.0",
7
+ "features": {
8
+ "idx": {"dtype": "int32", "id": null, "_type": "Value"},
9
+ "form": {"dtype": "string", "id": null, "_type": "Value"},
10
+ "lemma": {"dtype": "string", "id": null, "_type": "Value"},
11
+ "upos": {"dtype": "string", "id": null, "_type": "Value"},
12
+ "xpos": {"dtype": "string", "id": null, "_type": "Value"},
13
+ "feats": {"dtype": "string", "id": null, "_type": "Value"},
14
+ "head": {"dtype": "int32", "id": null, "_type": "Value"},
15
+ "deprel": {"dtype": "string", "id": null, "_type": "Value"},
16
+ "deps": {"dtype": "string", "id": null, "_type": "Value"},
17
+ "misc": {"dtype": "string", "id": null, "_type": "Value"}
18
+ },
19
+ "post_processed": null,
20
+ "supervised_keys": null,
21
+ "task_templates": [
22
+ {
23
+ "task": "dependency-parsing",
24
+ "column_mapping": {
25
+ "form": "tokens",
26
+ "head": "head",
27
+ "deprel": "deprel"
28
+ }
29
+ },
30
+ {
31
+ "task": "part-of-speech-tagging",
32
+ "column_mapping": {
33
+ "form": "tokens",
34
+ "upos": "pos_tags"
35
+ }
36
+ }
37
+ ],
38
+ "builder_name": "ud_urdu_udtb",
39
+ "config_name": "default",
40
+ "version": {
41
+ "version_str": "2.14.0",
42
+ "description": null,
43
+ "major": 2,
44
+ "minor": 14,
45
+ "patch": 0
46
+ },
47
+ "splits": {
48
+ "train": {
49
+ "name": "train",
50
+ "num_bytes": 0,
51
+ "num_examples": 4043,
52
+ "dataset_name": "ud_urdu_udtb"
53
+ },
54
+ "validation": {
55
+ "name": "validation",
56
+ "num_bytes": 0,
57
+ "num_examples": 552,
58
+ "dataset_name": "ud_urdu_udtb"
59
+ },
60
+ "test": {
61
+ "name": "test",
62
+ "num_bytes": 0,
63
+ "num_examples": 535,
64
+ "dataset_name": "ud_urdu_udtb"
65
+ }
66
+ }
67
+ }
68
+ }
ud_urdu_udtb.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import os
17
+
18
+ import datasets
19
+
20
+ _CITATION = """\
21
+ @InCollection{bhathindi,
22
+ Title = {The Hindi/Urdu Treebank Project},
23
+ Author = {Bhat, Riyaz Ahmad and Bhatt, Rajesh and Farudi, Annahita and Klassen, Prescott and Narasimhan, Bhuvana and Palmer, Martha and Rambow, Owen and Sharma, Dipti Misra and Vaidya, Ashwini and Vishnu, Sri Ramagurumurthy and others},
24
+ Booktitle = {Handbook of Linguistic Annotation},
25
+ Publisher = {Springer Press}
26
+ }
27
+
28
+ @inproceedings{palmer2009hindi,
29
+ title={Hindi syntax: Annotating dependency, lexical predicate-argument structure, and phrase structure},
30
+ author={Palmer, Martha and Bhatt, Rajesh and Narasimhan, Bhuvana and Rambow, Owen and Sharma, Dipti Misra and Xia, Fei},
31
+ booktitle={The 7th International Conference on Natural Language Processing},
32
+ pages={14--17},
33
+ year={2009}
34
+ }
35
+ """
36
+
37
+ _DESCRIPTION = """\
38
+ The Urdu Universal Dependency Treebank was automatically converted from Urdu Dependency Treebank (UDTB),
39
+ which is part of an ongoing effort of creating multi-layered treebanks for Hindi and Urdu.
40
+ This treebank provides syntactic annotations for Urdu text following the Universal Dependencies framework.
41
+ """
42
+
43
+ _HOMEPAGE = "https://universaldependencies.org/"
44
+
45
+ _LICENSE = "CC BY-NC-SA 4.0"
46
+
47
+ _URLs = {
48
+ "default": "https://raw.githubusercontent.com/UniversalDependencies/UD_Urdu-UDTB/master/{split}",
49
+ }
50
+
51
+ class UDUrduUDTB(datasets.GeneratorBasedBuilder):
52
+ """Urdu Universal Dependency Treebank dataset."""
53
+
54
+ VERSION = datasets.Version("2.14.0")
55
+
56
+ BUILDER_CONFIGS = [
57
+ datasets.BuilderConfig(
58
+ name="default",
59
+ version=VERSION,
60
+ description="Default configuration for UD_Urdu-UDTB",
61
+ ),
62
+ ]
63
+
64
+ def _info(self):
65
+ return datasets.DatasetInfo(
66
+ description=_DESCRIPTION,
67
+ features=datasets.Features(
68
+ {
69
+ "idx": datasets.Value("int32"),
70
+ "form": datasets.Value("string"),
71
+ "lemma": datasets.Value("string"),
72
+ "upos": datasets.Value("string"),
73
+ "xpos": datasets.Value("string"),
74
+ "feats": datasets.Value("string"),
75
+ "head": datasets.Value("int32"),
76
+ "deprel": datasets.Value("string"),
77
+ "deps": datasets.Value("string"),
78
+ "misc": datasets.Value("string"),
79
+ }
80
+ ),
81
+ supervised_keys=None,
82
+ homepage=_HOMEPAGE,
83
+ license=_LICENSE,
84
+ citation=_CITATION,
85
+ task_templates=[
86
+ datasets.TaskTemplate(
87
+ task="dependency-parsing",
88
+ column_mapping={
89
+ "form": "tokens",
90
+ "head": "head",
91
+ "deprel": "deprel",
92
+ },
93
+ ),
94
+ datasets.TaskTemplate(
95
+ task="part-of-speech-tagging",
96
+ column_mapping={
97
+ "form": "tokens",
98
+ "upos": "pos_tags",
99
+ },
100
+ ),
101
+ ],
102
+ )
103
+
104
+ def _split_generators(self, dl_manager):
105
+ """Returns SplitGenerators."""
106
+ urls = _URLs[self.config.name]
107
+ data_dir = dl_manager.download_and_extract(urls)
108
+
109
+ return [
110
+ datasets.SplitGenerator(
111
+ name=datasets.Split.TRAIN,
112
+ gen_kwargs={"filepath": os.path.join(data_dir, "ur_udtb-ud-train.conllu")},
113
+ ),
114
+ datasets.SplitGenerator(
115
+ name=datasets.Split.VALIDATION,
116
+ gen_kwargs={"filepath": os.path.join(data_dir, "ur_udtb-ud-dev.conllu")},
117
+ ),
118
+ datasets.SplitGenerator(
119
+ name=datasets.Split.TEST,
120
+ gen_kwargs={"filepath": os.path.join(data_dir, "ur_udtb-ud-test.conllu")},
121
+ ),
122
+ ]
123
+
124
+ def _generate_examples(self, filepath):
125
+ """Yields examples as (key, example) tuples."""
126
+ with open(filepath, "r", encoding="utf-8") as f:
127
+ current_sentences = []
128
+ sentence_id = 0
129
+
130
+ for line in f:
131
+ line = line.strip()
132
+
133
+ if line.startswith("#"):
134
+ # Skip comments
135
+ continue
136
+
137
+ if not line:
138
+ # Empty line marks end of sentence
139
+ if current_sentences:
140
+ for i, token_info in enumerate(current_sentences):
141
+ yield f"{sentence_id}-{i}", {
142
+ "idx": token_info[0],
143
+ "form": token_info[1],
144
+ "lemma": token_info[2],
145
+ "upos": token_info[3],
146
+ "xpos": token_info[4],
147
+ "feats": token_info[5],
148
+ "head": token_info[6],
149
+ "deprel": token_info[7],
150
+ "deps": token_info[8],
151
+ "misc": token_info[9],
152
+ }
153
+ sentence_id += 1
154
+ current_sentences = []
155
+ continue
156
+
157
+ # Parse ConLL-U format
158
+ fields = line.split("\t")
159
+ if len(fields) == 10 and not fields[0].startswith("#"):
160
+ if "-" not in fields[0]: # Skip multi-tokens
161
+ idx = int(fields[0])
162
+ form = fields[1]
163
+ lemma = fields[2]
164
+ upos = fields[3]
165
+ xpos = fields[4]
166
+ feats = fields[5]
167
+ head = int(fields[6]) if fields[6] != "_" else -1
168
+ deprel = fields[7]
169
+ deps = fields[8]
170
+ misc = fields[9]
171
+
172
+ current_sentences.append((idx, form, lemma, upos, xpos, feats, head, deprel, deps, misc))
173
+
174
+ # Don't forget the last sentence
175
+ if current_sentences:
176
+ for i, token_info in enumerate(current_sentences):
177
+ yield f"{sentence_id}-{i}", {
178
+ "idx": token_info[0],
179
+ "form": token_info[1],
180
+ "lemma": token_info[2],
181
+ "upos": token_info[3],
182
+ "xpos": token_info[4],
183
+ "feats": token_info[5],
184
+ "head": token_info[6],
185
+ "deprel": token_info[7],
186
+ "deps": token_info[8],
187
+ "misc": token_info[9],
188
+ }