raulgdp commited on
Commit
2fbc87b
1 Parent(s): 40fbe56

End of training

Browse files
README.md CHANGED
@@ -1,70 +1,71 @@
1
- ---
2
- license: apache-2.0
3
- base_model: distilbert-base-uncased
4
- tags:
5
- - generated_from_trainer
6
- metrics:
7
- - accuracy
8
- - f1
9
- - precision
10
- - recall
11
- model-index:
12
- - name: Distilbert
13
- results: []
14
- ---
15
-
16
- <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
- should probably proofread and complete it, then remove this comment. -->
18
-
19
- # Distilbert
20
-
21
- This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
22
- It achieves the following results on the evaluation set:
23
- - Loss: 0.9102
24
- - Accuracy: 0.7911
25
- - F1: 0.7368
26
- - Precision: 0.7554
27
- - Recall: 0.7192
28
-
29
- ## Model description
30
-
31
- More information needed
32
-
33
- ## Intended uses & limitations
34
-
35
- More information needed
36
-
37
- ## Training and evaluation data
38
-
39
- More information needed
40
-
41
- ## Training procedure
42
-
43
- ### Training hyperparameters
44
-
45
- The following hyperparameters were used during training:
46
- - learning_rate: 5e-05
47
- - train_batch_size: 16
48
- - eval_batch_size: 8
49
- - seed: 42
50
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
- - lr_scheduler_type: linear
52
- - num_epochs: 5
53
-
54
- ### Training results
55
-
56
- | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall |
57
- |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:|
58
- | 0.6349 | 1.0 | 90 | 0.5635 | 0.6908 | 0.5153 | 0.7108 | 0.4041 |
59
- | 0.3946 | 2.0 | 180 | 0.5372 | 0.7354 | 0.7181 | 0.6335 | 0.8288 |
60
- | 0.1805 | 3.0 | 270 | 0.6448 | 0.7827 | 0.7234 | 0.75 | 0.6986 |
61
- | 0.0692 | 4.0 | 360 | 0.8713 | 0.7632 | 0.7478 | 0.6597 | 0.8630 |
62
- | 0.0391 | 5.0 | 450 | 0.9102 | 0.7911 | 0.7368 | 0.7554 | 0.7192 |
63
-
64
-
65
- ### Framework versions
66
-
67
- - Transformers 4.43.0.dev0
68
- - Pytorch 2.0.1+cu117
69
- - Datasets 2.19.1
70
- - Tokenizers 0.19.1
 
 
1
+ ---
2
+ library_name: transformers
3
+ license: apache-2.0
4
+ base_model: distilbert-base-uncased
5
+ tags:
6
+ - generated_from_trainer
7
+ metrics:
8
+ - accuracy
9
+ - f1
10
+ - precision
11
+ - recall
12
+ model-index:
13
+ - name: Distilbert
14
+ results: []
15
+ ---
16
+
17
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
18
+ should probably proofread and complete it, then remove this comment. -->
19
+
20
+ # Distilbert
21
+
22
+ This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on an unknown dataset.
23
+ It achieves the following results on the evaluation set:
24
+ - Loss: 0.6418
25
+ - Accuracy: 0.7992
26
+ - F1: 0.7411
27
+ - Precision: 0.7935
28
+ - Recall: 0.6952
29
+
30
+ ## Model description
31
+
32
+ More information needed
33
+
34
+ ## Intended uses & limitations
35
+
36
+ More information needed
37
+
38
+ ## Training and evaluation data
39
+
40
+ More information needed
41
+
42
+ ## Training procedure
43
+
44
+ ### Training hyperparameters
45
+
46
+ The following hyperparameters were used during training:
47
+ - learning_rate: 5e-05
48
+ - train_batch_size: 32
49
+ - eval_batch_size: 16
50
+ - seed: 42
51
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
52
+ - lr_scheduler_type: linear
53
+ - num_epochs: 5
54
+
55
+ ### Training results
56
+
57
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | Precision | Recall |
58
+ |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|:---------:|:------:|
59
+ | 0.6496 | 1.0 | 32 | 0.5694 | 0.6811 | 0.5888 | 0.6304 | 0.5524 |
60
+ | 0.4365 | 2.0 | 64 | 0.5541 | 0.7441 | 0.6409 | 0.7632 | 0.5524 |
61
+ | 0.2431 | 3.0 | 96 | 0.5720 | 0.7795 | 0.7282 | 0.7426 | 0.7143 |
62
+ | 0.1262 | 4.0 | 128 | 0.5727 | 0.7874 | 0.7429 | 0.7429 | 0.7429 |
63
+ | 0.0742 | 5.0 | 160 | 0.6418 | 0.7992 | 0.7411 | 0.7935 | 0.6952 |
64
+
65
+
66
+ ### Framework versions
67
+
68
+ - Transformers 4.45.1
69
+ - Pytorch 2.4.0
70
+ - Datasets 2.20.0
71
+ - Tokenizers 0.20.0
config.json CHANGED
@@ -1,33 +1,33 @@
1
- {
2
- "_name_or_path": "distilbert-base-uncased",
3
- "activation": "gelu",
4
- "architectures": [
5
- "DistilBertForSequenceClassification"
6
- ],
7
- "attention_dropout": 0.1,
8
- "dim": 768,
9
- "dropout": 0.1,
10
- "hidden_dim": 3072,
11
- "id2label": {
12
- "0": "NEGATIVE",
13
- "1": "POSITIVE"
14
- },
15
- "initializer_range": 0.02,
16
- "label2id": {
17
- "NEGATIVE": 0,
18
- "POSITIVE": 1
19
- },
20
- "max_position_embeddings": 512,
21
- "model_type": "distilbert",
22
- "n_heads": 12,
23
- "n_layers": 6,
24
- "pad_token_id": 0,
25
- "problem_type": "single_label_classification",
26
- "qa_dropout": 0.1,
27
- "seq_classif_dropout": 0.2,
28
- "sinusoidal_pos_embds": false,
29
- "tie_weights_": true,
30
- "torch_dtype": "float32",
31
- "transformers_version": "4.43.0.dev0",
32
- "vocab_size": 30522
33
- }
 
1
+ {
2
+ "_name_or_path": "distilbert-base-uncased",
3
+ "activation": "gelu",
4
+ "architectures": [
5
+ "DistilBertForSequenceClassification"
6
+ ],
7
+ "attention_dropout": 0.1,
8
+ "dim": 768,
9
+ "dropout": 0.1,
10
+ "hidden_dim": 3072,
11
+ "id2label": {
12
+ "0": "NEGATIVE",
13
+ "1": "POSITIVE"
14
+ },
15
+ "initializer_range": 0.02,
16
+ "label2id": {
17
+ "NEGATIVE": 0,
18
+ "POSITIVE": 1
19
+ },
20
+ "max_position_embeddings": 512,
21
+ "model_type": "distilbert",
22
+ "n_heads": 12,
23
+ "n_layers": 6,
24
+ "pad_token_id": 0,
25
+ "problem_type": "single_label_classification",
26
+ "qa_dropout": 0.1,
27
+ "seq_classif_dropout": 0.2,
28
+ "sinusoidal_pos_embds": false,
29
+ "tie_weights_": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.45.1",
32
+ "vocab_size": 30522
33
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:02d34fe6b95eeee4e1016197919250718618b2c5c2151da4eb25640d3f2918cb
3
  size 267832560
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99342a3a6c293b5e8af8e7019316d248e8047c0717ec40fe241bcb7e6c7b68ac
3
  size 267832560
runs/Sep29_21-15-50_DESKTOP-P79TL96/events.out.tfevents.1727662555.DESKTOP-P79TL96 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26fc31b59d4887d7ecb1bfb3cecfbecea88f603d7c5dc803eec32028657ac970
3
+ size 8604
special_tokens_map.json CHANGED
@@ -1,7 +1,7 @@
1
- {
2
- "cls_token": "[CLS]",
3
- "mask_token": "[MASK]",
4
- "pad_token": "[PAD]",
5
- "sep_token": "[SEP]",
6
- "unk_token": "[UNK]"
7
- }
 
1
+ {
2
+ "cls_token": "[CLS]",
3
+ "mask_token": "[MASK]",
4
+ "pad_token": "[PAD]",
5
+ "sep_token": "[SEP]",
6
+ "unk_token": "[UNK]"
7
+ }
tokenizer_config.json CHANGED
@@ -1,55 +1,55 @@
1
- {
2
- "added_tokens_decoder": {
3
- "0": {
4
- "content": "[PAD]",
5
- "lstrip": false,
6
- "normalized": false,
7
- "rstrip": false,
8
- "single_word": false,
9
- "special": true
10
- },
11
- "100": {
12
- "content": "[UNK]",
13
- "lstrip": false,
14
- "normalized": false,
15
- "rstrip": false,
16
- "single_word": false,
17
- "special": true
18
- },
19
- "101": {
20
- "content": "[CLS]",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false,
25
- "special": true
26
- },
27
- "102": {
28
- "content": "[SEP]",
29
- "lstrip": false,
30
- "normalized": false,
31
- "rstrip": false,
32
- "single_word": false,
33
- "special": true
34
- },
35
- "103": {
36
- "content": "[MASK]",
37
- "lstrip": false,
38
- "normalized": false,
39
- "rstrip": false,
40
- "single_word": false,
41
- "special": true
42
- }
43
- },
44
- "clean_up_tokenization_spaces": true,
45
- "cls_token": "[CLS]",
46
- "do_lower_case": true,
47
- "mask_token": "[MASK]",
48
- "model_max_length": 512,
49
- "pad_token": "[PAD]",
50
- "sep_token": "[SEP]",
51
- "strip_accents": null,
52
- "tokenize_chinese_chars": true,
53
- "tokenizer_class": "DistilBertTokenizer",
54
- "unk_token": "[UNK]"
55
- }
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "100": {
12
+ "content": "[UNK]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "101": {
20
+ "content": "[CLS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "102": {
28
+ "content": "[SEP]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ },
35
+ "103": {
36
+ "content": "[MASK]",
37
+ "lstrip": false,
38
+ "normalized": false,
39
+ "rstrip": false,
40
+ "single_word": false,
41
+ "special": true
42
+ }
43
+ },
44
+ "clean_up_tokenization_spaces": false,
45
+ "cls_token": "[CLS]",
46
+ "do_lower_case": true,
47
+ "mask_token": "[MASK]",
48
+ "model_max_length": 512,
49
+ "pad_token": "[PAD]",
50
+ "sep_token": "[SEP]",
51
+ "strip_accents": null,
52
+ "tokenize_chinese_chars": true,
53
+ "tokenizer_class": "DistilBertTokenizer",
54
+ "unk_token": "[UNK]"
55
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d42854f96dfc043a77d42f859538cacdbb7e44be5bbfcdf318ec3191c4986a42
3
- size 4667
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a4a78f36e7ea94e6cec1ec4a1c4de31ded521fe18b2c25aa6da049dcd45455d
3
+ size 5176