model added
Browse files- *global*.pure_model +3 -0
- README.md +125 -3
- added_tokens.json +1 -0
- config.json +33 -0
- special_tokens_map.json +1 -0
- spiece.model +3 -0
- spiece.vocab +0 -0
- tokenizer_config.json +1 -0
*global*.pure_model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dc6f4d6e29c4f98bd76ca70d4fc8a30ccd9a15685a2a2a021bafd44435f26589
|
3 |
+
size 976426545
|
README.md
CHANGED
@@ -1,3 +1,125 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
IndicBARTSS is a multilingual, sequence-to-sequence pre-trained model focusing on Indic languages and English. It currently supports 11 Indian languages and is based on the mBART architecture. You can use IndicBARTSS model to build natural language generation applications for Indian languages by finetuning the model with supervised training data for tasks like machine translation, summarization, question generation, etc. Some salient features of the IndicBARTSS are:
|
2 |
+
|
3 |
+
<ul>
|
4 |
+
<li >Supported languages: Assamese, Bengali, Gujarati, Hindi, Marathi, Odiya, Punjabi, Kannada, Malayalam, Tamil, Telugu and English. Not all of these languages are supported by mBART50 and mT5. </li>
|
5 |
+
<li >The model is much smaller than the mBART and mT5(-base) models, so less computationally expensive for finetuning and decoding. </li>
|
6 |
+
<li> Trained on large Indic language corpora (452 million sentences and 9 billion tokens) which also includes Indian English content. </li>
|
7 |
+
<li> Unlike ai4bharat/IndicBART each language is written in its own script so you do not need to perform any script mapping to/from Devanagari. </li>
|
8 |
+
</ul>
|
9 |
+
|
10 |
+
You can read more about IndicBARTSS in this <a href="https://arxiv.org/abs/2109.02903">paper</a>.
|
11 |
+
|
12 |
+
For detailed documentation, look here: https://github.com/AI4Bharat/indic-bart/ and https://indicnlp.ai4bharat.org/indic-bart/
|
13 |
+
|
14 |
+
# Pre-training corpus
|
15 |
+
|
16 |
+
We used the <a href="https://indicnlp.ai4bharat.org/corpora/">IndicCorp</a> data spanning 12 languages with 452 million sentences (9 billion tokens). The model was trained using the text-infilling objective used in mBART.
|
17 |
+
|
18 |
+
# Usage:
|
19 |
+
|
20 |
+
```
|
21 |
+
from transformers import MBartForConditionalGeneration, AutoModelForSeq2SeqLM
|
22 |
+
from transformers import AlbertTokenizer, AutoTokenizer
|
23 |
+
|
24 |
+
tokenizer = AutoTokenizer.from_pretrained("ai4bharat/IndicBARTSS", do_lower_case=False, use_fast=False, keep_accents=True)
|
25 |
+
|
26 |
+
# Or use tokenizer = AlbertTokenizer.from_pretrained("ai4bharat/IndicBARTSS", do_lower_case=False, use_fast=False, keep_accents=True)
|
27 |
+
|
28 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("ai4bharat/IndicBARTSS")
|
29 |
+
|
30 |
+
# Or use model = MBartForConditionalGeneration.from_pretrained("ai4bharat/IndicBARTSS")
|
31 |
+
|
32 |
+
# Some initial mapping
|
33 |
+
bos_id = tokenizer._convert_token_to_id_with_added_voc("<s>")
|
34 |
+
eos_id = tokenizer._convert_token_to_id_with_added_voc("</s>")
|
35 |
+
pad_id = tokenizer._convert_token_to_id_with_added_voc("<pad>")
|
36 |
+
# To get lang_id use any of ['<2as>', '<2bn>', '<2en>', '<2gu>', '<2hi>', '<2kn>', '<2ml>', '<2mr>', '<2or>', '<2pa>', '<2ta>', '<2te>']
|
37 |
+
|
38 |
+
# First tokenize the input and outputs. The format below is how IndicBARTSS was trained so the input should be "Sentence </s> <2xx>" where xx is the language code. Similarly, the output should be "<2yy> Sentence </s>".
|
39 |
+
inp = tokenizer("I am a boy </s> <2en>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids # tensor([[ 466, 1981, 80, 25573, 64001, 64004]])
|
40 |
+
|
41 |
+
out = tokenizer("<2hi> मैं एक लड़का हूँ </s>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids # tensor([[64006, 942, 43, 32720, 8384, 64001]])
|
42 |
+
|
43 |
+
model_outputs=model(input_ids=inp, decoder_input_ids=out[:,0:-1], labels=out[:,1:])
|
44 |
+
|
45 |
+
# For loss
|
46 |
+
model_outputs.loss ## This is not label smoothed.
|
47 |
+
|
48 |
+
# For logits
|
49 |
+
model_outputs.logits
|
50 |
+
|
51 |
+
# For generation. Pardon the messiness. Note the decoder_start_token_id.
|
52 |
+
|
53 |
+
model.eval() # Set dropouts to zero
|
54 |
+
|
55 |
+
model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
56 |
+
|
57 |
+
|
58 |
+
# Decode to get output strings
|
59 |
+
|
60 |
+
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
61 |
+
|
62 |
+
print(decoded_output) # I am a boy
|
63 |
+
|
64 |
+
# What if we mask?
|
65 |
+
|
66 |
+
inp = tokenizer("I am [MASK] </s> <2en>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
67 |
+
|
68 |
+
model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
69 |
+
|
70 |
+
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
71 |
+
|
72 |
+
print(decoded_output) # I am happy
|
73 |
+
|
74 |
+
inp = tokenizer("मैं [MASK] हूँ </s> <2hi>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
75 |
+
|
76 |
+
model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
77 |
+
|
78 |
+
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
79 |
+
|
80 |
+
print(decoded_output) # मैं जानता हूँ
|
81 |
+
|
82 |
+
inp = tokenizer("मला [MASK] पाहिजे </s> <2mr>", add_special_tokens=False, return_tensors="pt", padding=True).input_ids
|
83 |
+
|
84 |
+
model_output=model.generate(inp, use_cache=True, num_beams=4, max_length=20, min_length=1, early_stopping=True, pad_token_id=pad_id, bos_token_id=bos_id, eos_token_id=eos_id, decoder_start_token_id=tokenizer._convert_token_to_id_with_added_voc("<2en>"))
|
85 |
+
|
86 |
+
decoded_output=tokenizer.decode(model_output[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)
|
87 |
+
|
88 |
+
print(decoded_output) # मला ओळखलं पाहिजे
|
89 |
+
|
90 |
+
```
|
91 |
+
|
92 |
+
# Notes:
|
93 |
+
1. This is compatible with the latest version of transformers but was developed with version 4.3.2 so consider using 4.3.2 if possible.
|
94 |
+
2. While I have only shown how to get logits and loss and how to generate outputs, you can do pretty much everything the MBartForConditionalGeneration class can do as in https://huggingface.co/docs/transformers/model_doc/mbart#transformers.MBartForConditionalGeneration
|
95 |
+
3. Note that the tokenizer I have used is based on sentencepiece and not BPE. Therefore, I used the AlbertTokenizer class and not the MBartTokenizer class.
|
96 |
+
# Fine-tuning on a downstream task
|
97 |
+
|
98 |
+
1. If you wish to fine-tune this model, then you can do so using the <a href="https://github.com/prajdabre/yanmtt">YANMTT</a> toolkit, following the instructions <a href="https://github.com/AI4Bharat/indic-bart ">here</a>.
|
99 |
+
2. (Untested) Alternatively, you may use the official huggingface scripts for <a href="https://github.com/huggingface/transformers/tree/master/examples/pytorch/translation">translation</a> and <a href="https://github.com/huggingface/transformers/tree/master/examples/pytorch/summarization">summarization</a>.
|
100 |
+
|
101 |
+
# Contributors
|
102 |
+
<ul>
|
103 |
+
<li> Raj Dabre </li>
|
104 |
+
<li> Himani Shrotriya </li>
|
105 |
+
<li> Anoop Kunchukuttan </li>
|
106 |
+
<li> Ratish Puduppully </li>
|
107 |
+
<li> Mitesh M. Khapra </li>
|
108 |
+
<li> Pratyush Kumar </li>
|
109 |
+
</ul>
|
110 |
+
|
111 |
+
# Paper
|
112 |
+
If you use IndicBARTSS, please cite the following paper:
|
113 |
+
```
|
114 |
+
@misc{dabre2021indicbart,
|
115 |
+
title={IndicBART: A Pre-trained Model for Natural Language Generation of Indic Languages},
|
116 |
+
author={Raj Dabre and Himani Shrotriya and Anoop Kunchukuttan and Ratish Puduppully and Mitesh M. Khapra and Pratyush Kumar},
|
117 |
+
year={2021},
|
118 |
+
eprint={2109.02903},
|
119 |
+
archivePrefix={arXiv},
|
120 |
+
primaryClass={cs.CL}
|
121 |
+
}
|
122 |
+
```
|
123 |
+
|
124 |
+
# License
|
125 |
+
The model is available under the MIT License.
|
added_tokens.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"<s>": 64000, "</s>": 64001, "<2shuf>": 64002, "<2as>": 64003, "<2bn>": 64004, "<2en>": 64005, "<2gu>": 64006, "<2hi>": 64007, "<2kn>": 64008, "<2ml>": 64009, "<2mr>": 64010, "<2or>": 64011, "<2pa>": 64012, "<2ta>": 64013, "<2te>": 64014}
|
config.json
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"activation_dropout": 0.1,
|
3 |
+
"activation_function": "gelu",
|
4 |
+
"architectures": [
|
5 |
+
"MBartForConditionalGeneration"
|
6 |
+
],
|
7 |
+
"attention_dropout": 0.1,
|
8 |
+
"bos_token_id": 64000,
|
9 |
+
"d_model": 1024,
|
10 |
+
"classifier_dropout": 0.0,
|
11 |
+
"decoder_attention_heads": 16,
|
12 |
+
"decoder_ffn_dim": 4096,
|
13 |
+
"decoder_layerdrop": 0.0,
|
14 |
+
"decoder_layers": 6,
|
15 |
+
"dropout": 0.1,
|
16 |
+
"encoder_attention_heads": 16,
|
17 |
+
"encoder_ffn_dim": 4096,
|
18 |
+
"encoder_layerdrop": 0.0,
|
19 |
+
"encoder_layers": 6,
|
20 |
+
"eos_token_id": 64001,
|
21 |
+
"gradient_checkpointing": false,
|
22 |
+
"init_std": 0.02,
|
23 |
+
"is_encoder_decoder": true,
|
24 |
+
"max_position_embeddings": 1024,
|
25 |
+
"model_type": "mbart",
|
26 |
+
"num_hidden_layers": 6,
|
27 |
+
"pad_token_id": 0,
|
28 |
+
"scale_embedding": false,
|
29 |
+
"transformers_version": "4.3.2",
|
30 |
+
"use_cache": true,
|
31 |
+
"vocab_size": 64015,
|
32 |
+
"tokenizer_class": "AlbertTokenizer"
|
33 |
+
}
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}, "additional_special_tokens": ["<s>", "</s>", "<2as>", "<2bn>", "<2en>", "<2gu>", "<2hi>", "<2kn>", "<2ml>", "<2mr>", "<2or>", "<2pa>", "<2ta>", "<2te>"]}
|
spiece.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:27d922f1b9444ae05eab57f3f0a9d60d4147e905a160f07c3b08116e7b3b8c6a
|
3 |
+
size 1803730
|
spiece.vocab
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"do_lower_case": false, "remove_space": true, "keep_accents": true, "bos_token": "[CLS]", "eos_token": "[SEP]", "unk_token": "<unk>", "sep_token": "[SEP]", "pad_token": "<pad>", "cls_token": "[CLS]", "mask_token": {"content": "[MASK]", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "use_fast": false, "special_tokens_map_file": "albert-indic64k/special_tokens_map.json", "tokenizer_file": null, "name_or_path": "albert-indic64k"}
|