monsoon-nlp
commited on
Commit
•
b322555
1
Parent(s):
d890fb5
initial commit
Browse files- README.md +29 -0
- config.json +23 -0
- pytorch_model.bin +3 -0
- training_args.bin +3 -0
- vocab.txt +0 -0
README.md
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
language: dv
|
3 |
+
---
|
4 |
+
|
5 |
+
# dv-labse
|
6 |
+
|
7 |
+
This is an experiment in cross-lingual transfer learning, to insert Dhivehi word and
|
8 |
+
word-piece tokens into Google's LaBSE model.
|
9 |
+
|
10 |
+
- Original model weights: https://huggingface.co/setu4993/LaBSE
|
11 |
+
- Original model announcement: https://ai.googleblog.com/2020/08/language-agnostic-bert-sentence.html
|
12 |
+
|
13 |
+
This currently outperforms dv-wave and dv-MuRIL (a similar transfer learning model) on
|
14 |
+
the Maldivian News Classification task https://github.com/Sofwath/DhivehiDatasets
|
15 |
+
|
16 |
+
- mBERT: 52%
|
17 |
+
- dv-wave (ELECTRA): 89%
|
18 |
+
- dv-muril: 90.7%
|
19 |
+
- dv-labse: 91.5% (may continue training)
|
20 |
+
|
21 |
+
## Training
|
22 |
+
|
23 |
+
- Start with LaBSE (similar to mBERT) with no Thaana vocabulary
|
24 |
+
- Based on PanLex dictionaries, attach 1,100 Dhivehi words to Sinhalese or English embeddings
|
25 |
+
- Add remaining words and word-pieces from dv-wave's vocabulary to vocab.txt
|
26 |
+
- Continue BERT pretraining on Dhivehi text
|
27 |
+
|
28 |
+
CoLab notebook:
|
29 |
+
https://colab.research.google.com/drive/1CUn44M2fb4Qbat2pAvjYqsPvWLt1Novi
|
config.json
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"architectures": [
|
3 |
+
"BertForMaskedLM"
|
4 |
+
],
|
5 |
+
"attention_probs_dropout_prob": 0.1,
|
6 |
+
"gradient_checkpointing": false,
|
7 |
+
"hidden_act": "gelu",
|
8 |
+
"hidden_dropout_prob": 0.1,
|
9 |
+
"hidden_size": 768,
|
10 |
+
"initializer_range": 0.02,
|
11 |
+
"intermediate_size": 3072,
|
12 |
+
"layer_norm_eps": 1e-12,
|
13 |
+
"max_position_embeddings": 512,
|
14 |
+
"model_type": "bert",
|
15 |
+
"num_attention_heads": 12,
|
16 |
+
"num_hidden_layers": 12,
|
17 |
+
"pad_token_id": 0,
|
18 |
+
"position_embedding_type": "absolute",
|
19 |
+
"transformers_version": "4.3.0",
|
20 |
+
"type_vocab_size": 2,
|
21 |
+
"use_cache": true,
|
22 |
+
"vocab_size": 528954
|
23 |
+
}
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8e83b53c49ec3cc36f160bd0cef7dcb5cd96f153286358b7e69430341105dfa6
|
3 |
+
size 1971321682
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:569bc9c6c7fcdaf89d336c1f63940b2c559b12d971b7055c8f4ce06ed71bce40
|
3 |
+
size 2159
|
vocab.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|