holylovenia commited on
Commit
0e0c3a0
1 Parent(s): b8028e8

Upload ud.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. ud.py +496 -0
ud.py ADDED
@@ -0,0 +1,496 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from pathlib import Path
17
+ from typing import Dict, List, Tuple, Iterable
18
+
19
+ import datasets
20
+ from copy import deepcopy
21
+ from conllu import TokenList
22
+
23
+ from seacrowd.utils import schemas
24
+ from seacrowd.utils.common_parser import load_ud_data, load_ud_data_as_seacrowd_kb
25
+ from seacrowd.utils.configs import SEACrowdConfig
26
+ from seacrowd.utils.constants import Tasks, Licenses
27
+
28
+ _CITATION = r"""
29
+ @misc{11234/1-5287,
30
+ title = {Universal Dependencies 2.13},
31
+ author = {Zeman, Daniel and Nivre, Joakim and Abrams, Mitchell and Ackermann, Elia and Aepli, No{\"e}mi and Aghaei, Hamid and Agi{\'c}, {\v Z}eljko and Ahmadi, Amir and Ahrenberg, Lars and Ajede, Chika Kennedy and Akkurt,
32
+ Salih Furkan and Aleksandravi{\v c}i{\=u}t{\.e}, Gabriel{\.e} and Alfina, Ika and Algom, Avner and Alnajjar, Khalid and Alzetta, Chiara and Andersen, Erik and Antonsen, Lene and Aoyama, Tatsuya and Aplonova, Katya and Aquino, Angelina and Aragon, Carolina and Aranes, Glyd and Aranzabe, Maria Jesus and Ar{\i}can, Bilge Nas and Arnard{\'o}ttir, {\t H}{\'o}runn and Arutie, Gashaw and Arwidarasti, Jessica Naraiswari and Asahara, Masayuki and {\'A}sgeirsd{\'o}ttir, Katla and Aslan, Deniz Baran and Asmazo{\u g}lu, Cengiz and Ateyah, Luma and Atmaca, Furkan and Attia, Mohammed and Atutxa, Aitziber and Augustinus, Liesbeth and Avel{\~a}s, Mariana and Badmaeva, Elena and Balasubramani, Keerthana and Ballesteros, Miguel and Banerjee, Esha and Bank, Sebastian and Barbu Mititelu, Verginica and Barkarson, Starkaður and Basile, Rodolfo and Basmov, Victoria and Batchelor, Colin and Bauer, John and Bedir, Seyyit Talha and Behzad, Shabnam and Belieni, Juan and Bengoetxea, Kepa and Benli, İbrahim and Ben Moshe, Yifat and Berk, G{\"o}zde and Bhat, Riyaz Ahmad and Biagetti, Erica and Bick, Eckhard and Bielinskien{\.e}, Agn{\.e} and Bjarnad{\'o}ttir, Krist{\'{\i}}n and Blokland, Rogier and Bobicev, Victoria and Boizou, Lo{\"{\i}}c and Borges V{\"o}lker, Emanuel and B{\"o}rstell, Carl and Bosco, Cristina and Bouma, Gosse and Bowman, Sam and Boyd, Adriane and Braggaar, Anouck and Branco, Ant{\'o}nio and Brokait{\.e}, Kristina and Burchardt, Aljoscha and Campos, Marisa and Candito, Marie and Caron, Bernard and Caron, Gauthier and Carvalheiro, Catarina and Carvalho, Rita and Cassidy, Lauren and Castro, Maria Clara and Castro, S{\'e}rgio and Cavalcanti, Tatiana and Cebiro{\u g}lu Eryi{\u g}it, G{\"u}l{\c s}en and Cecchini, Flavio Massimiliano and Celano, Giuseppe G. A. and {\v C}{\'e}pl{\"o}, Slavom{\'{\i}}r and Cesur, Neslihan and Cetin, Savas and {\c C}etino{\u g}lu, {\"O}zlem and Chalub, Fabricio and Chamila, Liyanage and Chauhan, Shweta and Chi, Ethan and Chika, Taishi and Cho, Yongseok and Choi, Jinho and Chun, Jayeol and Chung, Juyeon and Cignarella, Alessandra T. and Cinkov{\'a}, Silvie and Collomb, Aur{\'e}lie and {\c C}{\"o}ltekin, {\c C}a{\u g}r{\i} and Connor, Miriam and Corbetta, Claudia and Corbetta, Daniela and Costa, Francisco and Courtin, Marine and Crabb{\'e}, Beno{\^{\i}}t and Cristescu, Mihaela and Cvetkoski, Vladimir and Dale, Ingerid L{\o}yning and Daniel, Philemon and Davidson, Elizabeth and de Alencar, Leonel Figueiredo and Dehouck, Mathieu and de Laurentiis, Martina and de Marneffe, Marie-Catherine and de Paiva, Valeria and Derin, Mehmet Oguz and de Souza, Elvis and Diaz de Ilarraza, Arantza and Dickerson, Carly and Dinakaramani, Arawinda and Di Nuovo, Elisa and Dione, Bamba and Dirix, Peter and Dobrovoljc, Kaja and Doyle, Adrian and Dozat, Timothy and Droganova, Kira and Duran, Magali Sanches and Dwivedi, Puneet and Ebert, Christian and Eckhoff, Hanne and Eguchi, Masaki and Eiche, Sandra and Eli, Marhaba and Elkahky, Ali and Ephrem, Binyam and Erina, Olga and Erjavec, Toma{\v z} and Essaidi, Farah and Etienne, Aline and Evelyn, Wograine and Facundes, Sidney and Farkas, Rich{\'a}rd and Favero, Federica and Ferdaousi, Jannatul and Fernanda, Mar{\'{\i}}lia and Fernandez Alcalde, Hector and Fethi, Amal and Foster, Jennifer and Fransen, Theodorus and Freitas, Cl{\'a}udia and Fujita, Kazunori and Gajdo{\v s}ov{\'a}, Katar{\'{\i}}na and Galbraith, Daniel and Gamba, Federica and Garcia, Marcos and G{\"a}rdenfors, Moa and Gerardi, Fabr{\'{\i}}cio Ferraz and Gerdes, Kim and Gessler, Luke and Ginter, Filip and Godoy, Gustavo and Goenaga, Iakes and Gojenola, Koldo and G{\"o}k{\i}rmak, Memduh and Goldberg, Yoav and G{\'o}mez Guinovart, Xavier and Gonz{\'a}lez Saavedra,
33
+ Berta and Grici{\=u}t{\.e}, Bernadeta and Grioni, Matias and Grobol,
34
+ Lo{\"{\i}}c and Gr{\=
35
+ u}z{\={\i}}tis, Normunds and Guillaume, Bruno and Guiller, Kirian and Guillot-Barbance, C{\'e}line and G{\"u}ng{\"o}r, Tunga and Habash, Nizar and Hafsteinsson, Hinrik and Haji{\v c}, Jan and Haji{\v c} jr., Jan and H{\"a}m{\"a}l{\"a}inen, Mika and H{\`a} M{\~y}, Linh and Han, Na-Rae and Hanifmuti, Muhammad Yudistira and Harada, Takahiro and Hardwick, Sam and Harris, Kim and Haug, Dag and Heinecke, Johannes and Hellwig, Oliver and Hennig, Felix and Hladk{\'a}, Barbora and Hlav{\'a}{\v c}ov{\'a}, Jaroslava and Hociung, Florinel and Hohle, Petter and Huang, Yidi and Huerta Mendez, Marivel and Hwang, Jena and Ikeda, Takumi and Ingason, Anton Karl and Ion, Radu and Irimia, Elena and Ishola, {\d O}l{\'a}j{\'{\i}}d{\'e} and Islamaj, Artan and Ito, Kaoru and Jagodzi{\'n}ska, Sandra and Jannat, Siratun and Jel{\'{\i}}nek, Tom{\'a}{\v s} and Jha, Apoorva and Jiang, Katharine and Johannsen, Anders and J{\'o}nsd{\'o}ttir, Hildur and J{\o}rgensen, Fredrik and Juutinen, Markus and Ka{\c s}{\i}kara, H{\"u}ner and Kabaeva, Nadezhda and Kahane, Sylvain and Kanayama, Hiroshi and Kanerva, Jenna and Kara, Neslihan and Karah{\'o}ǧa, Ritv{\'a}n and K{\aa}sen, Andre and Kayadelen, Tolga and Kengatharaiyer, Sarveswaran and Kettnerov{\'a}, V{\'a}clava and Kharatyan, Lilit and Kirchner, Jesse and Klementieva, Elena and Klyachko, Elena and Kocharov, Petr and K{\"o}hn, Arne and K{\"o}ksal, Abdullatif and Kopacewicz, Kamil and Korkiakangas, Timo and K{\"o}se, Mehmet and Koshevoy, Alexey and Kotsyba, Natalia and Kovalevskait{\.e}, Jolanta and Krek, Simon and Krishnamurthy, Parameswari and K{\"u}bler, Sandra and Kuqi, Adrian and Kuyruk{\c c}u, O{\u g}uzhan and Kuzgun, Asl{\i} and Kwak, Sookyoung and Kyle, Kris and Laan, K{\"a}bi and Laippala, Veronika and Lambertino, Lorenzo and Lando, Tatiana and Larasati, Septina Dian and Lavrentiev, Alexei and Lee, John and L{\^e} H{\`{\^o}}ng, Phương and Lenci, Alessandro and Lertpradit, Saran and Leung, Herman and Levina, Maria and Levine, Lauren and Li, Cheuk Ying and Li, Josie and Li, Keying and Li, Yixuan and Li, Yuan and Lim, {KyungTae} and Lima Padovani, Bruna and Lin, Yi-Ju Jessica and Lind{\'e}n, Krister and Liu, Yang Janet and Ljube{\v s}i{\'c}, Nikola and Lobzhanidze, Irina and Loginova, Olga and Lopes, Lucelene and Lusito, Stefano and Luthfi, Andry and Luukko, Mikko and Lyashevskaya, Olga and Lynn, Teresa and Macketanz, Vivien and Mahamdi, Menel and Maillard, Jean and Makarchuk, Ilya and Makazhanov, Aibek and Mandl, Michael and Manning, Christopher and Manurung, Ruli and Mar{\c s}an, B{\"u}{\c s}ra and M{\u a}r{\u a}nduc, C{\u a}t{\u a}lina and Mare{\v c}ek, David and Marheinecke, Katrin and Markantonatou, Stella and Mart{\'{\i}}nez Alonso, H{\'e}ctor and Mart{\'{\i}}n Rodr{\'{\i}}guez, Lorena and Martins, Andr{\'e} and Martins, Cl{\'a}udia and Ma{\v s}ek, Jan and Matsuda, Hiroshi and Matsumoto, Yuji and Mazzei, Alessandro and {McDonald}, Ryan and {McGuinness}, Sarah and Mendon{\c c}a, Gustavo and Merzhevich, Tatiana and Miekka, Niko and Miller, Aaron and Mischenkova, Karina and Missil{\"a}, Anna and Mititelu, C{\u a}t{\u a}lin and Mitrofan, Maria and Miyao, Yusuke and Mojiri Foroushani, {AmirHossein} and Moln{\'a}r, Judit and Moloodi, Amirsaeid and Montemagni, Simonetta and More, Amir and Moreno Romero, Laura and Moretti, Giovanni and Mori, Shinsuke and Morioka, Tomohiko and Moro, Shigeki and Mortensen, Bjartur and Moskalevskyi, Bohdan and Muischnek, Kadri and Munro, Robert and Murawaki, Yugo and M{\"u}{\"u}risep, Kaili and Nainwani, Pinkey and Nakhl{\'e}, Mariam and Navarro Hor{\~n}iacek, Juan Ignacio and Nedoluzhko,
36
+ Anna and Ne{\v s}pore-B{\=e}rzkalne, Gunta and Nevaci, Manuela and Nguy{\~{\^e}}n Th{\d i}, Lương and Nguy{\~{\^e}}n Th{\d i} Minh, Huy{\`{\^e}}n and Nikaido, Yoshihiro and Nikolaev, Vitaly and Nitisaroj, Rattima and Nourian, Alireza and Nunes, Maria das Gra{\c c}as Volpe and Nurmi, Hanna and Ojala, Stina and Ojha, Atul Kr. and {\'O}lad{\'o}ttir, Hulda and Ol{\'u}{\`o}kun, Ad{\'e}day{\d o}̀ and Omura, Mai and Onwuegbuzia, Emeka and Ordan, Noam and Osenova, Petya and {\"O}stling, Robert and {\O}vrelid, Lilja and {\"O}zate{\c s}, {\c S}aziye Bet{\"u}l and {\"O}z{\c c}elik, Merve and {\"O}zg{\"u}r, Arzucan and {\"O}zt{\"u}rk Ba{\c s}aran, Balk{\i}z and Paccosi, Teresa and Palmero Aprosio, Alessio and Panova, Anastasia and Pardo, Thiago Alexandre Salgueiro and Park, Hyunji Hayley and Partanen, Niko and Pascual, Elena and Passarotti, Marco and Patejuk, Agnieszka and Paulino-Passos, Guilherme and Pedonese, Giulia and Peljak-{\L}api{\'n}ska, Angelika and Peng, Siyao and Peng, Siyao Logan and Pereira, Rita and Pereira, S{\'{\i}}lvia and Perez, Cenel-Augusto and Perkova, Natalia and Perrier, Guy and Petrov, Slav and Petrova, Daria and Peverelli, Andrea and Phelan, Jason and Pierre-Louis, Claudel and Piitulainen, Jussi and Pinter, Yuval and Pinto, Clara and Pintucci, Rodrigo and Pirinen, Tommi A and Pitler, Emily and Plamada, Magdalena and Plank, Barbara and Poibeau, Thierry and Ponomareva, Larisa and Popel, Martin and Pretkalni{\c n}a, Lauma and Pr{\'e}vost, Sophie and Prokopidis, Prokopis and Przepi{\'o}rkowski, Adam and Pugh, Robert and Puolakainen, Tiina and Pyysalo, Sampo and Qi, Peng and Querido, Andreia and R{\"a}{\"a}bis, Andriela and Rademaker, Alexandre and Rahoman, Mizanur and Rama, Taraka and Ramasamy, Loganathan and Ramisch, Carlos and Ramos, Joana and Rashel, Fam and Rasooli, Mohammad Sadegh and Ravishankar, Vinit and Real, Livy and Rebeja, Petru and Reddy, Siva and Regnault, Mathilde and Rehm, Georg and Riabi, Arij and Riabov, Ivan and Rie{\ss}ler, Michael and Rimkut{\.e}, Erika and Rinaldi, Larissa and Rituma, Laura and Rizqiyah, Putri and Rocha, Luisa and R{\"o}gnvaldsson, Eir{\'{\i}}kur and Roksandic, Ivan and Romanenko, Mykhailo and Rosa, Rudolf and Roșca, Valentin and Rovati, Davide and Rozonoyer, Ben and Rudina, Olga and Rueter, Jack and R{\'u}narsson, Kristj{\'a}n and Sadde, Shoval and Safari, Pegah and Sahala, Aleksi and Saleh, Shadi and Salomoni, Alessio and Samard{\v z}i{\'c}, Tanja and Samson, Stephanie and Sanguinetti, Manuela and San{\i}yar, Ezgi and S{\"a}rg, Dage and Sartor, Marta and Sasaki,
37
+ Mitsuya and Saul{\={\i}}te, Baiba and Savary, Agata and Sawanakunanon, Yanin and Saxena, Shefali and Scannell, Kevin and Scarlata, Salvatore and Schang, Emmanuel and Schneider, Nathan and Schuster, Sebastian and Schwartz, Lane and Seddah, Djam{\'e} and Seeker, Wolfgang and Seraji, Mojgan and Shahzadi, Syeda and Shen, Mo and Shimada, Atsuko and Shirasu, Hiroyuki and Shishkina, Yana and Shohibussirri, Muh and Shvedova, Maria and Siewert, Janine and Sigurðsson, Einar Freyr and Silva, Jo{\~a}o and Silveira, Aline and Silveira, Natalia and Silveira, Sara and Simi, Maria and Simionescu, Radu and Simk{\'o}, Katalin and {\v S}imkov{\'a}, M{\'a}ria and S{\'{\i}}monarson, Haukur Barri and Simov, Kiril and Sitchinava, Dmitri and Sither, Ted and Skachedubova, Maria and Smith, Aaron and Soares-Bastos, Isabela and Solberg, Per Erik and Sonnenhauser, Barbara and Sourov, Shafi and Sprugnoli, Rachele and Stamou, Vivian and Steingr{\'{\i}}msson, Stein{\t h}{\'o}r and Stella, Antonio and Stephen, Abishek and Straka, Milan and Strickland, Emmett and Strnadov{\'a}, Jana and Suhr, Alane and Sulestio, Yogi Lesmana and Sulubacak, Umut and Suzuki, Shingo and Swanson, Daniel and Sz{\'a}nt{\'o}, Zsolt and Taguchi, Chihiro and Taji, Dima and Tamburini, Fabio and Tan, Mary Ann C. and Tanaka, Takaaki and Tanaya, Dipta and Tavoni, Mirko and Tella, Samson and Tellier, Isabelle and Testori, Marinella and Thomas, Guillaume and Tonelli, Sara and Torga, Liisi and Toska, Marsida and Trosterud, Trond and Trukhina, Anna and Tsarfaty, Reut and T{\"u}rk, Utku and Tyers, Francis and {\t H}{\'o}rðarson, Sveinbj{\"o}rn and {\t H}orsteinsson, Vilhj{\'a}lmur and Uematsu, Sumire and Untilov, Roman and Ure{\v s}ov{\'a}, Zde{\v n}ka and Uria, Larraitz and Uszkoreit, Hans and Utka, Andrius and Vagnoni, Elena and Vajjala, Sowmya and Vak, Socrates and van der Goot, Rob and Vanhove, Martine and van Niekerk, Daniel and van Noord, Gertjan and Varga, Viktor and Vedenina, Uliana and Venturi, Giulia and Villemonte de la Clergerie, Eric and Vincze, Veronika and Vlasova, Natalia and Wakasa, Aya and Wallenberg, Joel C. and Wallin, Lars and Walsh, Abigail and Washington, Jonathan North and Wendt, Maximilan and Widmer, Paul and Wigderson, Shira and Wijono, Sri Hartati and Wille, Vanessa Berwanger and Williams, Seyi and Wir{\'e}n, Mats and Wittern, Christian and Woldemariam, Tsegay and Wong, Tak-sum and Wr{\'o}blewska, Alina and Wu, Qishen and Yako, Mary and Yamashita, Kayo and Yamazaki, Naoki and Yan, Chunxiao and Yasuoka, Koichi and Yavrumyan, Marat M. and Yenice, Arife Bet{\"u}l and Y{\i}ld{\i}z, Olcay Taner and Yu, Zhuoran and Yuliawati, Arlisa and {\v Z}abokrtsk{\'y}, Zden{\v e}k and Zahra, Shorouq and Zeldes, Amir and Zhou, He and Zhu, Hanzhi and Zhu, Yilun and Zhuravleva, Anna and Ziane, Rayan},
38
+ url = {http://hdl.handle.net/11234/1-5287},
39
+ note = {{LINDAT}/{CLARIAH}-{CZ} digital library at the Institute of Formal and Applied Linguistics ({{\'U}FAL}), Faculty of Mathematics and Physics, Charles University},
40
+ copyright = {Licence Universal Dependencies v2.13},
41
+ year = {2023} }
42
+ """
43
+
44
+ _LANGUAGES = ["ind", "vie", "tgl"]
45
+ _LOCAL = False
46
+
47
+ _DATASETNAME = "ud"
48
+
49
+ _SUPPORTED_TASKS = [Tasks.POS_TAGGING, Tasks.DEPENDENCY_PARSING, Tasks.MACHINE_TRANSLATION]
50
+
51
+ #map source subset names to index in `_SUPPORTED_TASKS`
52
+ _SOURCE_SUBSETS_TO_TASKS_INDEX = {
53
+ "id_csui": [0,1,2],
54
+ "id_gsd": [0,1],
55
+ "id_pud": [0,2],
56
+ "vi_vtb": [0,1],
57
+ "tl_trg": [0,1,2],
58
+ "tl_ugnayan": [0,2]
59
+ }
60
+
61
+ _DESCRIPTION = """\
62
+ Universal Dependencies (UD) is a project that is developing cross-linguistically consistent treebank annotation
63
+ for many languages, with the goal of facilitating multilingual parser development, cross-lingual learning, and
64
+ parsing research from a language typology perspective. The annotation scheme is based on an evolution of (universal)
65
+ Stanford dependencies (de Marneffe et al., 2006, 2008, 2014), Google universal part-of-speech tags
66
+ (Petrov et al., 2012), and the Interset interlingua for morphosyntactic tagsets (Zeman, 2008).
67
+ The general philosophy is to provide a universal inventory of categories and guidelines to facilitate consistent
68
+ annotation of similar constructions across languages, while allowing language-specific extensions when necessary.
69
+ """
70
+
71
+ _ISO_LANG_MAPPER_UD = {
72
+ "id": "ind",
73
+ "vi": "vie",
74
+ "tl": "tgl"
75
+ }
76
+
77
+ _HOMEPAGE = "https://lindat.mff.cuni.cz/repository/xmlui/handle/11234/1-5287"
78
+
79
+ _LICENSE = Licenses.APACHE_2_0.value
80
+
81
+ # "ud-v2.12": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-5150/ud-treebanks-v2.12.tgz?sequence=1&isAllowed=y"
82
+ # "ud-v2.13": "https://lindat.mff.cuni.cz/repository/xmlui/bitstream/handle/11234/1-5287/ud-treebanks-v2.13.tgz?sequence=1&isAllowed=y"
83
+
84
+ _URLS = {
85
+ "ud_id_csui": {
86
+ "train": "https://raw.githubusercontent.com/UniversalDependencies/UD_Indonesian-CSUI/master/id_csui-ud-train.conllu",
87
+ "test": "https://raw.githubusercontent.com/UniversalDependencies/UD_Indonesian-CSUI/master/id_csui-ud-test.conllu",
88
+ },
89
+ "ud_id_gsd": {
90
+ "train": "https://raw.githubusercontent.com/indolem/indolem/main/dependency_parsing/UD_Indonesian_GSD/id_gsd-ud-train.conllu",
91
+ "test": "https://raw.githubusercontent.com/indolem/indolem/main/dependency_parsing/UD_Indonesian_GSD/id_gsd-ud-test.conllu",
92
+ "dev": "https://raw.githubusercontent.com/indolem/indolem/main/dependency_parsing/UD_Indonesian_GSD/id_gsd-ud-dev.conllu",
93
+ },
94
+ "ud_id_pud": {
95
+ "test": "https://raw.githubusercontent.com/UniversalDependencies/UD_Indonesian-PUD/master/id_pud-ud-test.conllu"
96
+ },
97
+ "ud_vi_vtb": {
98
+ "train": "https://raw.githubusercontent.com/UniversalDependencies/UD_Vietnamese-VTB/master/vi_vtb-ud-train.conllu",
99
+ "test": "https://raw.githubusercontent.com/UniversalDependencies/UD_Vietnamese-VTB/master/vi_vtb-ud-test.conllu",
100
+ "dev": "https://raw.githubusercontent.com/UniversalDependencies/UD_Vietnamese-VTB/master/vi_vtb-ud-dev.conllu",
101
+ },
102
+ "ud_tl_trg": {
103
+ "test": "https://raw.githubusercontent.com/UniversalDependencies/UD_Tagalog-TRG/master/tl_trg-ud-test.conllu",
104
+ },
105
+ "ud_tl_ugnayan": {
106
+ "test": "https://raw.githubusercontent.com/UniversalDependencies/UD_Tagalog-Ugnayan/master/tl_ugnayan-ud-test.conllu",
107
+ },
108
+ }
109
+
110
+ _SOURCE_VERSION = "2.13.0"
111
+
112
+ _SEACROWD_VERSION = "2024.06.20"
113
+
114
+
115
+ class UDDataset(datasets.GeneratorBasedBuilder):
116
+
117
+ SOURCE_BUILDER_CONFIGS = [
118
+ SEACrowdConfig(
119
+ name=f"{_DATASETNAME}_{subset_name}_source",
120
+ version=datasets.Version(_SOURCE_VERSION),
121
+ description=f"{_DATASETNAME} source schema",
122
+ schema="source",
123
+ subset_id=f"{_DATASETNAME}_{subset_name}",
124
+ )
125
+ for subset_name in _SOURCE_SUBSETS_TO_TASKS_INDEX.keys()]
126
+ SEQUENCE_BUILDER_CONFIGS = [
127
+ SEACrowdConfig(
128
+ name=f"{_DATASETNAME}_{subset_name}_seacrowd_seq_label",
129
+ version=datasets.Version(_SEACROWD_VERSION),
130
+ description=f"{_DATASETNAME} SEACrowd Seq Label schema",
131
+ schema="seacrowd_seq_label",
132
+ subset_id=f"{_DATASETNAME}_{subset_name}",
133
+ )
134
+ for subset_name, task_idx in _SOURCE_SUBSETS_TO_TASKS_INDEX.items() if _SUPPORTED_TASKS.index(Tasks.POS_TAGGING) in task_idx]
135
+ KB_CONFIGS = [
136
+ SEACrowdConfig(
137
+ name=f"{_DATASETNAME}_{subset_name}_seacrowd_kb",
138
+ version=datasets.Version(_SEACROWD_VERSION),
139
+ description=f"{_DATASETNAME} SEACrowd Knowlegde Base schema",
140
+ schema="seacrowd_kb",
141
+ subset_id=f"{_DATASETNAME}_{subset_name}",
142
+ )
143
+ for subset_name, task_idx in _SOURCE_SUBSETS_TO_TASKS_INDEX.items() if _SUPPORTED_TASKS.index(Tasks.DEPENDENCY_PARSING) in task_idx]
144
+ T2T_CONFIGS = [
145
+ SEACrowdConfig(
146
+ name=f"{_DATASETNAME}_{subset_name}_seacrowd_t2t",
147
+ version=datasets.Version(_SEACROWD_VERSION),
148
+ description=f"{_DATASETNAME} SEACrowd Translation T2T schema EN-XX",
149
+ schema="seacrowd_t2t",
150
+ subset_id=f"{_DATASETNAME}_{subset_name}",
151
+ )
152
+ for subset_name, task_idx in _SOURCE_SUBSETS_TO_TASKS_INDEX.items() if _SUPPORTED_TASKS.index(Tasks.MACHINE_TRANSLATION) in task_idx]
153
+
154
+ BUILDER_CONFIGS = SOURCE_BUILDER_CONFIGS + SEQUENCE_BUILDER_CONFIGS + KB_CONFIGS + T2T_CONFIGS
155
+
156
+ UPOS_TAGS = ["ADJ", "ADP", "ADV", "AUX", "CCONJ", "DET", "INTJ", "NOUN", "NUM", "PART", "PRON", "PROPN", "PUNCT", "SCONJ", "SYM", "VERB", "X"]
157
+
158
+ def _info(self) -> datasets.DatasetInfo:
159
+ if self.config.schema == "source":
160
+ schema_dict = {
161
+ # metadata
162
+ "sent_id": datasets.Value("string"),
163
+ "text": datasets.Value("string"),
164
+ # tokens
165
+ "id": datasets.Sequence(datasets.Value("string")),
166
+ "form": datasets.Sequence(datasets.Value("string")),
167
+ "lemma": datasets.Sequence(datasets.Value("string")),
168
+ "upos": datasets.Sequence(datasets.Value("string")),
169
+ "xpos": datasets.Sequence(datasets.Value("string")),
170
+ "feats": datasets.Sequence(datasets.Value("string")),
171
+ "head": datasets.Sequence(datasets.Value("string")),
172
+ "deprel": datasets.Sequence(datasets.Value("string")),
173
+ "deps": datasets.Sequence(datasets.Value("string")),
174
+ "misc": datasets.Sequence(datasets.Value("string")),
175
+ }
176
+
177
+ # add text_en for UD data that has en text (for T2T)
178
+ if _SUPPORTED_TASKS.index(Tasks.MACHINE_TRANSLATION) in _SOURCE_SUBSETS_TO_TASKS_INDEX["_".join(self.config.subset_id.split("_")[1:])]:
179
+ schema_dict["text_en"] = datasets.Value("string")
180
+
181
+ # add "gloss" and "source" for tl_trg subset
182
+ if self.config.subset_id == "ud_tl_trg":
183
+ schema_dict["gloss"] = datasets.Value("string")
184
+ schema_dict["source"] = datasets.Value("string")
185
+
186
+ features = datasets.Features(schema_dict)
187
+
188
+ elif self.config.schema == "seacrowd_seq_label":
189
+ features = schemas.seq_label_features(self.UPOS_TAGS)
190
+
191
+ elif self.config.schema == "seacrowd_kb":
192
+ features = schemas.kb_features
193
+
194
+ elif self.config.schema == "seacrowd_t2t":
195
+ features = schemas.text2text_features
196
+
197
+ else:
198
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
199
+
200
+ return datasets.DatasetInfo(
201
+ description=_DESCRIPTION,
202
+ features=features,
203
+ homepage=_HOMEPAGE,
204
+ license=_LICENSE,
205
+ citation=self._generate_additional_citation(self.config.subset_id),
206
+ )
207
+
208
+ def _split_generators(
209
+ self, dl_manager: datasets.DownloadManager
210
+ ) -> List[datasets.SplitGenerator]:
211
+ """Returns SplitGenerators."""
212
+
213
+ return self._ud_split_generator(dl_manager, self.config.subset_id)
214
+
215
+
216
+ def _generate_examples(self, filepath: Path) -> Tuple[int, Dict]:
217
+ """instance tuple generated in the form (key, labels)"""
218
+ dataset = self._ud_generate_examples(filepath, self.config.subset_id, self.info.features, self.config.schema == "source")
219
+
220
+ if self.config.schema == "source":
221
+ pass
222
+
223
+ elif self.config.schema == "seacrowd_seq_label":
224
+ #some data has label of "_" which indicates a token that has multiple labels (it has the splitted values in the subsequent/preceeding iterables)
225
+ def remove_invalid_labels_from_seq(sent_id: str, tokens: Iterable, labels: Iterable, invalid_tokens: Iterable):
226
+ _tokens, _labels = [], []
227
+ for idx, val in enumerate(labels):
228
+ if val not in invalid_tokens:
229
+ _tokens.append(tokens[idx])
230
+ _labels.append(labels[idx])
231
+
232
+ return sent_id, _tokens, _labels
233
+
234
+ dataset = list(
235
+ map(
236
+ lambda d: dict(zip(
237
+ ("id", "tokens", "labels"),
238
+ remove_invalid_labels_from_seq(d["sent_id"], d["form"], d["upos"],
239
+ invalid_tokens=("_"))
240
+ )),
241
+ filter(lambda d: len(d["form"]) == len(d["upos"]),dataset)
242
+ )
243
+ )
244
+
245
+ elif self.config.schema == "seacrowd_t2t":
246
+ dataset = list(
247
+ map(
248
+ lambda d: {
249
+ "id": d["sent_id"],
250
+ "text_1": d["text_en"],
251
+ "text_2": d["text"],
252
+ "text_1_name": "eng",
253
+ "text_2_name": _ISO_LANG_MAPPER_UD[self.config.subset_id.split("_")[1]],
254
+ },
255
+ filter(lambda d: d.get("text_en"), dataset),
256
+ )
257
+ )
258
+
259
+ elif self.config.schema == "seacrowd_kb":
260
+ morph_anomaly = self._get_morph_exceptions(self.config.subset_id)
261
+ dataset = load_ud_data_as_seacrowd_kb(
262
+ filepath,
263
+ dataset,
264
+ morph_exceptions=morph_anomaly
265
+ )
266
+
267
+ else:
268
+ raise NotImplementedError(f"Schema '{self.config.schema}' is not defined.")
269
+
270
+ for key, example in enumerate(dataset):
271
+ yield key, example
272
+
273
+ @staticmethod
274
+ def _set_load_ud_source_data_kwargs(subset_name: str):
275
+
276
+ def _assert_multispan_range_is_one(token_list: TokenList):
277
+ """
278
+ Asserting that all tokens with multiple span can only have 2 span, and \
279
+ no field other than form has important information
280
+ """
281
+ for token in token_list.filter(id=lambda i: not isinstance(i, int)):
282
+ _id = token["id"]
283
+ assert len(_id) == 3, f"Unexpected length of non-int CONLLU Token's id. Expected 3, found {len(_id)};"
284
+ assert all(isinstance(a, b) for a, b in zip(_id, [int, str, int])), f"Non-int ID should be in format of '\\d+-\\d+'. Found {_id};"
285
+ assert _id[2] - _id[0] == 1, f"Token has more than 2 spans. Found {_id[2] - _id[0] + 1} spans;"
286
+ for key in ["lemma", "upos", "xpos", "feats", "head", "deprel", "deps"]:
287
+ assert token[key] in {"_", None}, f"Field other than 'form' should not contain extra information. Found: '{key}' = '{token[key]}'"
288
+
289
+ kwargs_return = {}
290
+
291
+ if subset_name == "ud_id_csui":
292
+ kwargs_return = {
293
+ "filter_kwargs": {"id": lambda i: isinstance(i, int)},
294
+ "assert_fn": _assert_multispan_range_is_one}
295
+
296
+ if subset_name == "ud_jv_csui":
297
+ kwargs_return = {
298
+ "filter_kwargs": {"id": lambda i: isinstance(i, int)}}
299
+
300
+ return kwargs_return
301
+
302
+ @staticmethod
303
+ def _generate_additional_citation(subset_name: str):
304
+ # generate additional citation, which `_CITATION` value defined above is appended to the subset-based UD citation
305
+
306
+ if subset_name == "ud_id_csui":
307
+ CITATION = r"""
308
+ @article {10.3844/jcssp.2020.1585.1597,
309
+ author = {Alfina, Ika and Budi, Indra and Suhartanto, Heru},
310
+ title = {Tree Rotations for Dependency Trees: Converting the Head-Directionality of Noun Phrases},
311
+ article_type = {journal},
312
+ volume = {16},
313
+ number = {11},
314
+ year = {2020},
315
+ month = {Nov},
316
+ pages = {1585-1597},
317
+ doi = {10.3844/jcssp.2020.1585.1597},
318
+ url = {https://thescipub.com/abstract/jcssp.2020.1585.1597},
319
+ journal = {Journal of Computer Science},
320
+ publisher = {Science Publications}
321
+ }
322
+
323
+ """ + _CITATION
324
+
325
+ if subset_name == "ud_id_gsd":
326
+ CITATION = r"""
327
+ @inproceedings{mcdonald-etal-2013-universal,
328
+ title = "{U}niversal {D}ependency Annotation for Multilingual Parsing",
329
+ author = {McDonald, Ryan and
330
+ Nivre, Joakim and
331
+ Quirmbach-Brundage, Yvonne and
332
+ Goldberg, Yoav and
333
+ Das, Dipanjan and
334
+ Ganchev, Kuzman and
335
+ Hall, Keith and
336
+ Petrov, Slav and
337
+ Zhang, Hao and
338
+ T{\"a}ckstr{\"o}m, Oscar and
339
+ Bedini, Claudia and
340
+ Bertomeu Castell{\'o}, N{\'u}ria and
341
+ Lee, Jungmee},
342
+ booktitle = "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)",
343
+ month = aug,
344
+ year = "2013",
345
+ address = "Sofia, Bulgaria",
346
+ publisher = "Association for Computational Linguistics",
347
+ url = "https://aclanthology.org/P13-2017",
348
+ pages = "92--97",
349
+ }
350
+
351
+ @article{DBLP:journals/corr/abs-2011-00677,
352
+ author = {Fajri Koto and
353
+ Afshin Rahimi and
354
+ Jey Han Lau and
355
+ Timothy Baldwin},
356
+ title = {IndoLEM and IndoBERT: {A} Benchmark Dataset and Pre-trained Language
357
+ Model for Indonesian {NLP}},
358
+ journal = {CoRR},
359
+ volume = {abs/2011.00677},
360
+ year = {2020},
361
+ url = {https://arxiv.org/abs/2011.00677},
362
+ eprinttype = {arXiv},
363
+ eprint = {2011.00677},
364
+ timestamp = {Fri, 06 Nov 2020 15:32:47 +0100},
365
+ biburl = {https://dblp.org/rec/journals/corr/abs-2011-00677.bib},
366
+ bibsource = {dblp computer science bibliography, https://dblp.org}
367
+
368
+ """ + _CITATION
369
+
370
+ if subset_name == "ud_id_gsd":
371
+ CITATION = r"""
372
+ @conference{2f8c7438a7f44f6b85b773586cff54e8,
373
+ title = "A gold standard dependency treebank for Indonesian",
374
+ author = "Ika Alfina and Arawinda Dinakaramani and Fanany, {Mohamad Ivan} and Heru Suhartanto",
375
+ note = "Publisher Copyright: {\textcopyright} 2019 Proceedings of the 33rd Pacific Asia Conference on Language, Information and Computation, PACLIC 2019. All rights reserved.; \
376
+ 33rd Pacific Asia Conference on Language, Information and Computation, PACLIC 2019 ; Conference date: 13-09-2019 Through 15-09-2019",
377
+ year = "2019",
378
+ month = jan,
379
+ day = "1",
380
+ language = "English",
381
+ pages = "1--9",
382
+ }
383
+
384
+ @article{DBLP:journals/corr/abs-2011-00677,
385
+ author = {Fajri Koto and
386
+ Afshin Rahimi and
387
+ Jey Han Lau and
388
+ Timothy Baldwin},
389
+ title = {IndoLEM and IndoBERT: {A} Benchmark Dataset and Pre-trained Language
390
+ Model for Indonesian {NLP}},
391
+ journal = {CoRR},
392
+ volume = {abs/2011.00677},
393
+ year = {2020},
394
+ url = {https://arxiv.org/abs/2011.00677},
395
+ eprinttype = {arXiv},
396
+ eprint = {2011.00677},
397
+ timestamp = {Fri, 06 Nov 2020 15:32:47 +0100},
398
+ biburl = {https://dblp.org/rec/journals/corr/abs-2011-00677.bib},
399
+ bibsource = {dblp computer science bibliography, https://dblp.org}
400
+ }
401
+
402
+ """ + _CITATION
403
+
404
+ else:
405
+ CITATION = _CITATION
406
+
407
+ return CITATION
408
+
409
+ @staticmethod
410
+ def _get_morph_exceptions(subset_name: str):
411
+ morph_anomaly = []
412
+ # not implemented yet
413
+ # if subset_name == "ud_jv_csui":
414
+ # morph_anomaly = [
415
+ # # Exceptions due to inconsistencies in the raw data annotation
416
+ # ("ne", "e"),
417
+ # ("nipun", "ipun"),
418
+ # ("me", "e"), # occurrence word: "Esemme" = "Esem" + "e". original text has double 'm'.
419
+ # ]
420
+
421
+ return morph_anomaly
422
+
423
+ @staticmethod
424
+ def _ud_split_generator(dl_manager, subset_name: str):
425
+
426
+ split_dset = []
427
+ urls = _URLS[subset_name]
428
+ data_path = dl_manager.download(urls)
429
+ if "train" in data_path:
430
+ split_dset.append(datasets.SplitGenerator(
431
+ name=datasets.Split.TRAIN,
432
+ gen_kwargs={
433
+ "filepath": data_path["train"]
434
+ },
435
+ ))
436
+ if "test" in data_path:
437
+ split_dset.append(datasets.SplitGenerator(
438
+ name=datasets.Split.TEST,
439
+ gen_kwargs={
440
+ "filepath": data_path["test"],
441
+ },
442
+ ))
443
+ if "dev" in data_path:
444
+ split_dset.append(datasets.SplitGenerator(
445
+ name=datasets.Split.VALIDATION,
446
+ gen_kwargs={
447
+ "filepath": data_path["dev"],
448
+ },
449
+ ))
450
+
451
+ return split_dset
452
+
453
+ @classmethod
454
+ def _ud_generate_examples(cls, filepath: str | list, subset_name: str, features: Iterable, is_source: bool):
455
+
456
+ #utility to fill data w/ default val
457
+ def fill_data(data, col_name, fill_val):
458
+ _data = deepcopy(data)
459
+ _data[col_name] = _data.get(col_name, fill_val)
460
+ return _data
461
+
462
+ #utility to remove data
463
+ def pop_data(data, col_name):
464
+ _data = deepcopy(data)
465
+ _data.pop(col_name, None)
466
+ return _data
467
+
468
+ # allow list of filepath be loaded
469
+ if isinstance(filepath, str):
470
+ filepath = [filepath]
471
+
472
+ dataset = []
473
+ for _filepath in filepath:
474
+ dataset.extend(list(
475
+ load_ud_data(
476
+ _filepath, **cls._set_load_ud_source_data_kwargs(subset_name)
477
+ )
478
+ ))
479
+
480
+ # remove the data from source since the occurrence is small (presumably malformed)
481
+ # and not listed in misc features (https://tables.grew.fr/?data=ud_feats/MISC)
482
+ if subset_name == "ud_tl_ugnayan":
483
+ for key in ("newdoc_id", "text_id"):
484
+ dataset = list(map(lambda x: pop_data(x, key), dataset))
485
+
486
+ if subset_name == "ud_tl_trg":
487
+ for key in ("AP", "BP", "OP", "DP", "PIV"):
488
+ dataset = list(map(lambda x: pop_data(x, key), dataset))
489
+
490
+ # fill w/ default only for Source schema
491
+ if is_source:
492
+ for key, default_val in zip(("text_en", "gloss", "source"), ("", "", "")):
493
+ if key in features:
494
+ dataset = list(map(lambda x: fill_data(x, key, default_val), dataset))
495
+
496
+ return dataset