model
Browse files- config.json +37 -0
- merges.txt +0 -0
- pytorch_model.bin +3 -0
- special_tokens_map.json +1 -0
- tokenizer.json +0 -0
- tokenizer_config.json +1 -0
- trainer_state.json +110 -0
- training_args.bin +3 -0
- vocab.json +0 -0
config.json
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "/gpfs/projects/bsc88/projects/catalan_evaluation/models/roberta-large-ca-v2",
|
3 |
+
"architectures": [
|
4 |
+
"RobertaForSequenceClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.1,
|
7 |
+
"bos_token_id": 0,
|
8 |
+
"classifier_dropout": null,
|
9 |
+
"eos_token_id": 2,
|
10 |
+
"finetuning_task": "paraf",
|
11 |
+
"hidden_act": "gelu",
|
12 |
+
"hidden_dropout_prob": 0.1,
|
13 |
+
"hidden_size": 1024,
|
14 |
+
"id2label": {
|
15 |
+
"0": "No Parafrasis",
|
16 |
+
"1": "Parafrasis"
|
17 |
+
},
|
18 |
+
"initializer_range": 0.02,
|
19 |
+
"intermediate_size": 4096,
|
20 |
+
"label2id": {
|
21 |
+
"No Parafrasis": 0,
|
22 |
+
"Parafrasis": 1
|
23 |
+
},
|
24 |
+
"layer_norm_eps": 1e-05,
|
25 |
+
"max_position_embeddings": 514,
|
26 |
+
"model_type": "roberta",
|
27 |
+
"num_attention_heads": 16,
|
28 |
+
"num_hidden_layers": 24,
|
29 |
+
"pad_token_id": 1,
|
30 |
+
"position_embedding_type": "absolute",
|
31 |
+
"problem_type": "single_label_classification",
|
32 |
+
"torch_dtype": "float32",
|
33 |
+
"transformers_version": "4.17.0",
|
34 |
+
"type_vocab_size": 1,
|
35 |
+
"use_cache": true,
|
36 |
+
"vocab_size": 50262
|
37 |
+
}
|
merges.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:49a4b62f251baff1449d840251b3bd5691c06f1b83570d030c5063a2bb1289a2
|
3 |
+
size 1421600941
|
special_tokens_map.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true}}
|
tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
tokenizer_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"errors": "replace", "bos_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "eos_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "sep_token": {"content": "</s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "cls_token": {"content": "<s>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "unk_token": {"content": "<unk>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "pad_token": {"content": "<pad>", "single_word": false, "lstrip": false, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": true, "__type": "AddedToken"}, "add_prefix_space": true, "trim_offsets": true, "max_len": 512, "special_tokens_map_file": null, "name_or_path": "/gpfs/projects/bsc88/projects/catalan_evaluation/models/roberta-large-ca-v2", "tokenizer_class": "RobertaTokenizer"}
|
trainer_state.json
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": 0.8438365477338476,
|
3 |
+
"best_model_checkpoint": "output/roberta-large-ca-v2/parafraseja.py_8_0.00005_date_22-10-14_time_16-03-07/checkpoint-4995",
|
4 |
+
"epoch": 5.0,
|
5 |
+
"global_step": 4995,
|
6 |
+
"is_hyper_param_search": false,
|
7 |
+
"is_local_process_zero": true,
|
8 |
+
"is_world_process_zero": true,
|
9 |
+
"log_history": [
|
10 |
+
{
|
11 |
+
"epoch": 1.0,
|
12 |
+
"learning_rate": 4e-05,
|
13 |
+
"loss": 0.5335,
|
14 |
+
"step": 999
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"epoch": 1.0,
|
18 |
+
"eval_accuracy": 0.836,
|
19 |
+
"eval_combined_score": 0.8360819180819181,
|
20 |
+
"eval_f1": 0.8361638361638362,
|
21 |
+
"eval_loss": 0.39040425419807434,
|
22 |
+
"eval_runtime": 58.4758,
|
23 |
+
"eval_samples_per_second": 34.202,
|
24 |
+
"eval_steps_per_second": 2.138,
|
25 |
+
"step": 999
|
26 |
+
},
|
27 |
+
{
|
28 |
+
"epoch": 2.0,
|
29 |
+
"learning_rate": 3e-05,
|
30 |
+
"loss": 0.3403,
|
31 |
+
"step": 1998
|
32 |
+
},
|
33 |
+
{
|
34 |
+
"epoch": 2.0,
|
35 |
+
"eval_accuracy": 0.832,
|
36 |
+
"eval_combined_score": 0.8342083739045765,
|
37 |
+
"eval_f1": 0.8364167478091529,
|
38 |
+
"eval_loss": 0.4153313934803009,
|
39 |
+
"eval_runtime": 58.4308,
|
40 |
+
"eval_samples_per_second": 34.229,
|
41 |
+
"eval_steps_per_second": 2.139,
|
42 |
+
"step": 1998
|
43 |
+
},
|
44 |
+
{
|
45 |
+
"epoch": 3.0,
|
46 |
+
"learning_rate": 2e-05,
|
47 |
+
"loss": 0.2345,
|
48 |
+
"step": 2997
|
49 |
+
},
|
50 |
+
{
|
51 |
+
"epoch": 3.0,
|
52 |
+
"eval_accuracy": 0.8405,
|
53 |
+
"eval_combined_score": 0.8427098930481283,
|
54 |
+
"eval_f1": 0.8449197860962566,
|
55 |
+
"eval_loss": 0.4383120834827423,
|
56 |
+
"eval_runtime": 58.4693,
|
57 |
+
"eval_samples_per_second": 34.206,
|
58 |
+
"eval_steps_per_second": 2.138,
|
59 |
+
"step": 2997
|
60 |
+
},
|
61 |
+
{
|
62 |
+
"epoch": 4.0,
|
63 |
+
"learning_rate": 1e-05,
|
64 |
+
"loss": 0.1644,
|
65 |
+
"step": 3996
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 4.0,
|
69 |
+
"eval_accuracy": 0.836,
|
70 |
+
"eval_combined_score": 0.8401272554605888,
|
71 |
+
"eval_f1": 0.8442545109211776,
|
72 |
+
"eval_loss": 0.6573591828346252,
|
73 |
+
"eval_runtime": 58.3674,
|
74 |
+
"eval_samples_per_second": 34.266,
|
75 |
+
"eval_steps_per_second": 2.142,
|
76 |
+
"step": 3996
|
77 |
+
},
|
78 |
+
{
|
79 |
+
"epoch": 5.0,
|
80 |
+
"learning_rate": 0.0,
|
81 |
+
"loss": 0.1097,
|
82 |
+
"step": 4995
|
83 |
+
},
|
84 |
+
{
|
85 |
+
"epoch": 5.0,
|
86 |
+
"eval_accuracy": 0.841,
|
87 |
+
"eval_combined_score": 0.8438365477338476,
|
88 |
+
"eval_f1": 0.8466730954676953,
|
89 |
+
"eval_loss": 0.7086848616600037,
|
90 |
+
"eval_runtime": 58.3516,
|
91 |
+
"eval_samples_per_second": 34.275,
|
92 |
+
"eval_steps_per_second": 2.142,
|
93 |
+
"step": 4995
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 5.0,
|
97 |
+
"step": 4995,
|
98 |
+
"total_flos": 7.447995455717376e+16,
|
99 |
+
"train_loss": 0.27648499787629427,
|
100 |
+
"train_runtime": 7079.06,
|
101 |
+
"train_samples_per_second": 11.29,
|
102 |
+
"train_steps_per_second": 0.706
|
103 |
+
}
|
104 |
+
],
|
105 |
+
"max_steps": 4995,
|
106 |
+
"num_train_epochs": 5,
|
107 |
+
"total_flos": 7.447995455717376e+16,
|
108 |
+
"trial_name": null,
|
109 |
+
"trial_params": null
|
110 |
+
}
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1ed676d440a87f46a5079e293c785b21237a9713d0cff8bae8bac8bdb041c102
|
3 |
+
size 3119
|
vocab.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|