apwic commited on
Commit
a8e45a4
1 Parent(s): eb671a5

Model save

Browse files
README.md ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: mit
3
+ base_model: indolem/indobert-base-uncased
4
+ tags:
5
+ - generated_from_trainer
6
+ metrics:
7
+ - precision
8
+ - recall
9
+ - f1
10
+ - accuracy
11
+ model-index:
12
+ - name: nerugm-lora-r2a0d0.15
13
+ results: []
14
+ ---
15
+
16
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
17
+ should probably proofread and complete it, then remove this comment. -->
18
+
19
+ # nerugm-lora-r2a0d0.15
20
+
21
+ This model is a fine-tuned version of [indolem/indobert-base-uncased](https://huggingface.co/indolem/indobert-base-uncased) on an unknown dataset.
22
+ It achieves the following results on the evaluation set:
23
+ - Loss: 0.1346
24
+ - Precision: 0.7342
25
+ - Recall: 0.8652
26
+ - F1: 0.7943
27
+ - Accuracy: 0.9555
28
+
29
+ ## Model description
30
+
31
+ More information needed
32
+
33
+ ## Intended uses & limitations
34
+
35
+ More information needed
36
+
37
+ ## Training and evaluation data
38
+
39
+ More information needed
40
+
41
+ ## Training procedure
42
+
43
+ ### Training hyperparameters
44
+
45
+ The following hyperparameters were used during training:
46
+ - learning_rate: 5e-05
47
+ - train_batch_size: 16
48
+ - eval_batch_size: 64
49
+ - seed: 42
50
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
51
+ - lr_scheduler_type: linear
52
+ - num_epochs: 20.0
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |
57
+ |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:|
58
+ | 0.79 | 1.0 | 528 | 0.4638 | 0.3302 | 0.0813 | 0.1305 | 0.8595 |
59
+ | 0.3919 | 2.0 | 1056 | 0.2519 | 0.5954 | 0.6729 | 0.6318 | 0.9275 |
60
+ | 0.2386 | 3.0 | 1584 | 0.1927 | 0.6540 | 0.7908 | 0.7159 | 0.9382 |
61
+ | 0.193 | 4.0 | 2112 | 0.1677 | 0.6826 | 0.8234 | 0.7464 | 0.9448 |
62
+ | 0.1712 | 5.0 | 2640 | 0.1594 | 0.6959 | 0.8443 | 0.7629 | 0.9476 |
63
+ | 0.1596 | 6.0 | 3168 | 0.1544 | 0.7082 | 0.8559 | 0.7751 | 0.9498 |
64
+ | 0.1524 | 7.0 | 3696 | 0.1519 | 0.7012 | 0.8605 | 0.7728 | 0.9506 |
65
+ | 0.1452 | 8.0 | 4224 | 0.1461 | 0.7203 | 0.8605 | 0.7842 | 0.9522 |
66
+ | 0.1397 | 9.0 | 4752 | 0.1432 | 0.7263 | 0.8559 | 0.7858 | 0.9535 |
67
+ | 0.1369 | 10.0 | 5280 | 0.1394 | 0.7258 | 0.8536 | 0.7845 | 0.9539 |
68
+ | 0.1336 | 11.0 | 5808 | 0.1375 | 0.7321 | 0.8512 | 0.7872 | 0.9543 |
69
+ | 0.1305 | 12.0 | 6336 | 0.1375 | 0.7345 | 0.8536 | 0.7896 | 0.9547 |
70
+ | 0.1281 | 13.0 | 6864 | 0.1351 | 0.7330 | 0.8536 | 0.7887 | 0.9547 |
71
+ | 0.1252 | 14.0 | 7392 | 0.1360 | 0.7342 | 0.8652 | 0.7943 | 0.9553 |
72
+ | 0.124 | 15.0 | 7920 | 0.1364 | 0.7292 | 0.8559 | 0.7875 | 0.9541 |
73
+ | 0.1234 | 16.0 | 8448 | 0.1351 | 0.7260 | 0.8605 | 0.7876 | 0.9549 |
74
+ | 0.1224 | 17.0 | 8976 | 0.1357 | 0.7299 | 0.8652 | 0.7918 | 0.9549 |
75
+ | 0.1208 | 18.0 | 9504 | 0.1360 | 0.7333 | 0.8675 | 0.7948 | 0.9553 |
76
+ | 0.1201 | 19.0 | 10032 | 0.1350 | 0.7347 | 0.8675 | 0.7956 | 0.9555 |
77
+ | 0.1205 | 20.0 | 10560 | 0.1346 | 0.7342 | 0.8652 | 0.7943 | 0.9555 |
78
+
79
+
80
+ ### Framework versions
81
+
82
+ - Transformers 4.39.3
83
+ - Pytorch 2.3.0+cu121
84
+ - Datasets 2.19.1
85
+ - Tokenizers 0.15.2
nerugm-lora/adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": {
3
+ "alpha": 0,
4
+ "architecture": "lora",
5
+ "attn_matrices": [
6
+ "q",
7
+ "v"
8
+ ],
9
+ "composition_mode": "add",
10
+ "dropout": 0.15,
11
+ "init_weights": "lora",
12
+ "intermediate_lora": false,
13
+ "leave_out": [],
14
+ "output_lora": false,
15
+ "r": 2,
16
+ "selfattn_lora": true,
17
+ "use_gating": false
18
+ },
19
+ "config_id": "4f0133dd791c95dc",
20
+ "hidden_size": 768,
21
+ "model_class": "BertForTokenClassification",
22
+ "model_name": "indolem/indobert-base-uncased",
23
+ "model_type": "bert",
24
+ "name": "nerugm-lora",
25
+ "version": "0.2.0"
26
+ }
nerugm-lora/head_config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "config": null,
3
+ "hidden_size": 768,
4
+ "label2id": {
5
+ "B-LOCATION": 0,
6
+ "B-ORGANIZATION": 1,
7
+ "B-PERSON": 2,
8
+ "B-QUANTITY": 3,
9
+ "B-TIME": 4,
10
+ "I-LOCATION": 5,
11
+ "I-ORGANIZATION": 6,
12
+ "I-PERSON": 7,
13
+ "I-QUANTITY": 8,
14
+ "I-TIME": 9,
15
+ "O": 10
16
+ },
17
+ "model_class": "BertForTokenClassification",
18
+ "model_name": "indolem/indobert-base-uncased",
19
+ "model_type": "bert",
20
+ "name": null,
21
+ "num_labels": 11,
22
+ "version": "0.2.0"
23
+ }
nerugm-lora/pytorch_adapter.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f94724ceaeb03b9ba86b5900cb817daaf570c9950d1336d36eda1669f0d8d6b5
3
+ size 312614
nerugm-lora/pytorch_model_head.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d248a2f333321797d8e4eb71bb4e0817b3b3100353ea10779fca2d42d4c24473
3
+ size 35354
runs/May24_19-22-33_indolem-petl-vm/events.out.tfevents.1716578561.indolem-petl-vm.1652764.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d2a198609522119094f2a5e367d2a636301ff84f87b1e265bae05738fd83e593
3
- size 18202
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56c626486b08b5223972cb232de26be78bfeabf0131e7a095d4e8d4e97618ebf
3
+ size 19239