SushantGautam commited on
Commit
5df7fd1
1 Parent(s): ddf0f18

Training in progress, step 500

Browse files
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
config.json CHANGED
@@ -1,10 +1,27 @@
1
  {
2
- "_name_or_path": "bert-base-cased",
3
  "architectures": [
4
- "BertForSequenceClassification"
5
  ],
 
6
  "attention_probs_dropout_prob": 0.1,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  "classifier_dropout": null,
 
8
  "gradient_checkpointing": false,
9
  "hidden_act": "gelu",
10
  "hidden_dropout_prob": 0.1,
@@ -13,23 +30,25 @@
13
  "0": 0,
14
  "1": 1
15
  },
 
16
  "initializer_range": 0.02,
17
  "intermediate_size": 3072,
18
  "label2id": {
19
  "0": 0,
20
  "1": 1
21
  },
22
- "layer_norm_eps": 1e-12,
23
- "max_position_embeddings": 512,
24
- "model_type": "bert",
25
  "num_attention_heads": 12,
26
  "num_hidden_layers": 12,
27
- "pad_token_id": 0,
28
  "position_embedding_type": "absolute",
29
  "problem_type": "single_label_classification",
 
30
  "torch_dtype": "float32",
31
  "transformers_version": "4.21.0.dev0",
32
- "type_vocab_size": 2,
33
  "use_cache": true,
34
- "vocab_size": 28996
35
  }
 
1
  {
2
+ "_name_or_path": "danielhou13/longformer-finetuned_papers",
3
  "architectures": [
4
+ "LongformerForSequenceClassification"
5
  ],
6
+ "attention_mode": "longformer",
7
  "attention_probs_dropout_prob": 0.1,
8
+ "attention_window": [
9
+ 512,
10
+ 512,
11
+ 512,
12
+ 512,
13
+ 512,
14
+ 512,
15
+ 512,
16
+ 512,
17
+ 512,
18
+ 512,
19
+ 512,
20
+ 512
21
+ ],
22
+ "bos_token_id": 0,
23
  "classifier_dropout": null,
24
+ "eos_token_id": 2,
25
  "gradient_checkpointing": false,
26
  "hidden_act": "gelu",
27
  "hidden_dropout_prob": 0.1,
 
30
  "0": 0,
31
  "1": 1
32
  },
33
+ "ignore_attention_mask": false,
34
  "initializer_range": 0.02,
35
  "intermediate_size": 3072,
36
  "label2id": {
37
  "0": 0,
38
  "1": 1
39
  },
40
+ "layer_norm_eps": 1e-05,
41
+ "max_position_embeddings": 4098,
42
+ "model_type": "longformer",
43
  "num_attention_heads": 12,
44
  "num_hidden_layers": 12,
45
+ "pad_token_id": 1,
46
  "position_embedding_type": "absolute",
47
  "problem_type": "single_label_classification",
48
+ "sep_token_id": 2,
49
  "torch_dtype": "float32",
50
  "transformers_version": "4.21.0.dev0",
51
+ "type_vocab_size": 1,
52
  "use_cache": true,
53
+ "vocab_size": 50265
54
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cade85ada38037cb76df1b4494cbf65ff4dfa4ffc508bb73bde409d3c679505b
3
- size 433321105
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f15b40c73a175c2a56798129b340dd32d71f87b64e59da6e7f6127bd649bffd
3
+ size 594740970
sentencepiece.bpe.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cfc8146abe2a0488e9e2a0c56de7952f7c11ab059eca145a0a727afce0db2865
3
+ size 5069051
special_tokens_map.json CHANGED
@@ -1,7 +1,63 @@
1
  {
2
- "cls_token": "[CLS]",
3
- "mask_token": "[MASK]",
4
- "pad_token": "[PAD]",
5
- "sep_token": "[SEP]",
6
- "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  }
 
1
  {
2
+ "additional_special_tokens": [
3
+ "ar_AR",
4
+ "cs_CZ",
5
+ "de_DE",
6
+ "en_XX",
7
+ "es_XX",
8
+ "et_EE",
9
+ "fi_FI",
10
+ "fr_XX",
11
+ "gu_IN",
12
+ "hi_IN",
13
+ "it_IT",
14
+ "ja_XX",
15
+ "kk_KZ",
16
+ "ko_KR",
17
+ "lt_LT",
18
+ "lv_LV",
19
+ "my_MM",
20
+ "ne_NP",
21
+ "nl_XX",
22
+ "ro_RO",
23
+ "ru_RU",
24
+ "si_LK",
25
+ "tr_TR",
26
+ "vi_VN",
27
+ "zh_CN",
28
+ "af_ZA",
29
+ "az_AZ",
30
+ "bn_IN",
31
+ "fa_IR",
32
+ "he_IL",
33
+ "hr_HR",
34
+ "id_ID",
35
+ "ka_GE",
36
+ "km_KH",
37
+ "mk_MK",
38
+ "ml_IN",
39
+ "mn_MN",
40
+ "mr_IN",
41
+ "pl_PL",
42
+ "ps_AF",
43
+ "pt_XX",
44
+ "sv_SE",
45
+ "sw_KE",
46
+ "ta_IN",
47
+ "te_IN",
48
+ "th_TH",
49
+ "tl_XX",
50
+ "uk_UA",
51
+ "ur_PK",
52
+ "xh_ZA",
53
+ "gl_ES",
54
+ "sl_SI"
55
+ ],
56
+ "bos_token": "<s>",
57
+ "cls_token": "<s>",
58
+ "eos_token": "</s>",
59
+ "mask_token": "<mask>",
60
+ "pad_token": "<pad>",
61
+ "sep_token": "</s>",
62
+ "unk_token": "<unk>"
63
  }
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json CHANGED
@@ -1,14 +1,77 @@
1
  {
2
- "cls_token": "[CLS]",
3
- "do_lower_case": false,
4
- "mask_token": "[MASK]",
5
- "model_max_length": 512,
6
- "name_or_path": "bert-base-cased",
7
- "pad_token": "[PAD]",
8
- "sep_token": "[SEP]",
9
- "special_tokens_map_file": null,
10
- "strip_accents": null,
11
- "tokenize_chinese_chars": true,
12
- "tokenizer_class": "BertTokenizer",
13
- "unk_token": "[UNK]"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  }
 
1
  {
2
+ "additional_special_tokens": [
3
+ "ar_AR",
4
+ "cs_CZ",
5
+ "de_DE",
6
+ "en_XX",
7
+ "es_XX",
8
+ "et_EE",
9
+ "fi_FI",
10
+ "fr_XX",
11
+ "gu_IN",
12
+ "hi_IN",
13
+ "it_IT",
14
+ "ja_XX",
15
+ "kk_KZ",
16
+ "ko_KR",
17
+ "lt_LT",
18
+ "lv_LV",
19
+ "my_MM",
20
+ "ne_NP",
21
+ "nl_XX",
22
+ "ro_RO",
23
+ "ru_RU",
24
+ "si_LK",
25
+ "tr_TR",
26
+ "vi_VN",
27
+ "zh_CN",
28
+ "af_ZA",
29
+ "az_AZ",
30
+ "bn_IN",
31
+ "fa_IR",
32
+ "he_IL",
33
+ "hr_HR",
34
+ "id_ID",
35
+ "ka_GE",
36
+ "km_KH",
37
+ "mk_MK",
38
+ "ml_IN",
39
+ "mn_MN",
40
+ "mr_IN",
41
+ "pl_PL",
42
+ "ps_AF",
43
+ "pt_XX",
44
+ "sv_SE",
45
+ "sw_KE",
46
+ "ta_IN",
47
+ "te_IN",
48
+ "th_TH",
49
+ "tl_XX",
50
+ "uk_UA",
51
+ "ur_PK",
52
+ "xh_ZA",
53
+ "gl_ES",
54
+ "sl_SI"
55
+ ],
56
+ "bos_token": "<s>",
57
+ "cls_token": "<s>",
58
+ "eos_token": "</s>",
59
+ "mask_token": {
60
+ "__type": "AddedToken",
61
+ "content": "<mask>",
62
+ "lstrip": true,
63
+ "normalized": true,
64
+ "rstrip": false,
65
+ "single_word": false
66
+ },
67
+ "model_max_length": 1024,
68
+ "name_or_path": "facebook/mbart-large-50",
69
+ "pad_token": "<pad>",
70
+ "sep_token": "</s>",
71
+ "sp_model_kwargs": {},
72
+ "special_tokens_map_file": "/home/suraj/projects/mbart-50/hf_models/mbart-50-large/special_tokens_map.json",
73
+ "src_lang": null,
74
+ "tgt_lang": null,
75
+ "tokenizer_class": "MBart50Tokenizer",
76
+ "unk_token": "<unk>"
77
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d96ba03212dac3447b6ff94f6988a9690d494a6aa62ae271c899a2c2fc3ac221
3
  size 3311
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cdd9f11ec0611e7c1667b6b3576b8b5182e15a55bb5d5d4e75a66f5ae39651d
3
  size 3311