# coding=utf-8 # Copyright 2024 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import datasets _CITATION = """\ @InCollection{bhathindi, Title = {The Hindi/Urdu Treebank Project}, Author = {Bhat, Riyaz Ahmad and Bhatt, Rajesh and Farudi, Annahita and Klassen, Prescott and Narasimhan, Bhuvana and Palmer, Martha and Rambow, Owen and Sharma, Dipti Misra and Vaidya, Ashwini and Vishnu, Sri Ramagurumurthy and others}, Booktitle = {Handbook of Linguistic Annotation}, Publisher = {Springer Press} } @inproceedings{palmer2009hindi, title={Hindi syntax: Annotating dependency, lexical predicate-argument structure, and phrase structure}, author={Palmer, Martha and Bhatt, Rajesh and Narasimhan, Bhuvana and Rambow, Owen and Sharma, Dipti Misra and Xia, Fei}, booktitle={The 7th International Conference on Natural Language Processing}, pages={14--17}, year={2009} } """ _DESCRIPTION = """\ The Urdu Universal Dependency Treebank was automatically converted from Urdu Dependency Treebank (UDTB), which is part of an ongoing effort of creating multi-layered treebanks for Hindi and Urdu. This treebank provides syntactic annotations for Urdu text following the Universal Dependencies framework. """ _HOMEPAGE = "https://universaldependencies.org/" _LICENSE = "CC BY-NC-SA 4.0" _URLs = { "default": "https://raw.githubusercontent.com/UniversalDependencies/UD_Urdu-UDTB/master/{split}", } class UDUrduUDTB(datasets.GeneratorBasedBuilder): """Urdu Universal Dependency Treebank dataset.""" VERSION = datasets.Version("2.14.0") BUILDER_CONFIGS = [ datasets.BuilderConfig( name="default", version=VERSION, description="Default configuration for UD_Urdu-UDTB", ), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "idx": datasets.Value("int32"), "form": datasets.Value("string"), "lemma": datasets.Value("string"), "upos": datasets.Value("string"), "xpos": datasets.Value("string"), "feats": datasets.Value("string"), "head": datasets.Value("int32"), "deprel": datasets.Value("string"), "deps": datasets.Value("string"), "misc": datasets.Value("string"), } ), supervised_keys=None, homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, task_templates=[ datasets.TaskTemplate( task="dependency-parsing", column_mapping={ "form": "tokens", "head": "head", "deprel": "deprel", }, ), datasets.TaskTemplate( task="part-of-speech-tagging", column_mapping={ "form": "tokens", "upos": "pos_tags", }, ), ], ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" urls = _URLs[self.config.name] data_dir = dl_manager.download_and_extract(urls) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": os.path.join(data_dir, "ur_udtb-ud-train.conllu")}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepath": os.path.join(data_dir, "ur_udtb-ud-dev.conllu")}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": os.path.join(data_dir, "ur_udtb-ud-test.conllu")}, ), ] def _generate_examples(self, filepath): """Yields examples as (key, example) tuples.""" with open(filepath, "r", encoding="utf-8") as f: current_sentences = [] sentence_id = 0 for line in f: line = line.strip() if line.startswith("#"): # Skip comments continue if not line: # Empty line marks end of sentence if current_sentences: for i, token_info in enumerate(current_sentences): yield f"{sentence_id}-{i}", { "idx": token_info[0], "form": token_info[1], "lemma": token_info[2], "upos": token_info[3], "xpos": token_info[4], "feats": token_info[5], "head": token_info[6], "deprel": token_info[7], "deps": token_info[8], "misc": token_info[9], } sentence_id += 1 current_sentences = [] continue # Parse ConLL-U format fields = line.split("\t") if len(fields) == 10 and not fields[0].startswith("#"): if "-" not in fields[0]: # Skip multi-tokens idx = int(fields[0]) form = fields[1] lemma = fields[2] upos = fields[3] xpos = fields[4] feats = fields[5] head = int(fields[6]) if fields[6] != "_" else -1 deprel = fields[7] deps = fields[8] misc = fields[9] current_sentences.append((idx, form, lemma, upos, xpos, feats, head, deprel, deps, misc)) # Don't forget the last sentence if current_sentences: for i, token_info in enumerate(current_sentences): yield f"{sentence_id}-{i}", { "idx": token_info[0], "form": token_info[1], "lemma": token_info[2], "upos": token_info[3], "xpos": token_info[4], "feats": token_info[5], "head": token_info[6], "deprel": token_info[7], "deps": token_info[8], "misc": token_info[9], }