vesteinn commited on
Commit
f05964f
1 Parent(s): 7be77d5

Model copied from huggingface.com/vesteinn

Browse files
README.md ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: is
3
+ widget:
4
+ - text: Má bjóða þér <mask> í kvöld?
5
+ - text: Forseti <mask> er ágæt.
6
+ - text: Súpan var <mask> á bragðið.
7
+ tags:
8
+ - roberta
9
+ - icelandic
10
+ - masked-lm
11
+ - pytorch
12
+ license: agpl-3.0
13
+ ---
14
+
15
+ # IceBERT
16
+
17
+ IceBERT was trained with fairseq using the RoBERTa-base model. The training data used is shown in the table below.
18
+
19
+ | Dataset | Size | Tokens |
20
+ |------------------------------------------------------|---------|--------|
21
+ | Icelandic Gigaword Corpus v20.05 (IGC) | 8.2 GB | 1,388M |
22
+ | Icelandic Common Crawl Corpus (IC3) | 4.9 GB | 824M |
23
+ | Greynir News articles | 456 MB | 76M |
24
+ | Icelandic Sagas | 9 MB | 1.7M |
25
+ | Open Icelandic e-books (Rafbókavefurinn) | 14 MB | 2.6M |
26
+ | Data from the medical library of Landspitali | 33 MB | 5.2M |
27
+ | Student theses from Icelandic universities (Skemman) | 2.2 GB | 367M |
28
+ | Total | 15.8 GB | 2,664M |
config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "RobertaForMaskedLM"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "bos_token_id": 0,
7
+ "eos_token_id": 2,
8
+ "gradient_checkpointing": false,
9
+ "hidden_act": "gelu",
10
+ "hidden_dropout_prob": 0.1,
11
+ "hidden_size": 768,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "max_position_embeddings": 514,
16
+ "model_type": "roberta",
17
+ "num_attention_heads": 12,
18
+ "num_hidden_layers": 12,
19
+ "pad_token_id": 1,
20
+ "type_vocab_size": 1,
21
+ "vocab_size": 50000
22
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9edb9a935877ed5c1fec314ab84b41db258ae0586416f5c2902b73413cacc3b6
3
+ size 651825533
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}}
vocab.json ADDED
The diff for this file is too large to render. See raw diff