ritutweets46
commited on
Commit
•
060540c
1
Parent(s):
665028f
End of training
Browse files- README.md +16 -26
- logs/events.out.tfevents.1710831582.ccc3f2fe76fd.4084.0 +2 -2
- model.safetensors +1 -1
- tokenizer.json +2 -16
README.md
CHANGED
@@ -17,14 +17,14 @@ should probably proofread and complete it, then remove this comment. -->
|
|
17 |
|
18 |
This model is a fine-tuned version of [microsoft/layoutlm-base-uncased](https://huggingface.co/microsoft/layoutlm-base-uncased) on the funsd dataset.
|
19 |
It achieves the following results on the evaluation set:
|
20 |
-
- Loss: 1.
|
21 |
-
- Answer: {'precision': 0.
|
22 |
-
- Header: {'precision': 0.
|
23 |
-
- Question: {'precision': 0.
|
24 |
-
- Overall Precision: 0.
|
25 |
-
- Overall Recall: 0.
|
26 |
-
- Overall F1: 0.
|
27 |
-
- Overall Accuracy: 0.
|
28 |
|
29 |
## Model description
|
30 |
|
@@ -49,27 +49,17 @@ The following hyperparameters were used during training:
|
|
49 |
- seed: 42
|
50 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
51 |
- lr_scheduler_type: linear
|
52 |
-
- num_epochs:
|
53 |
|
54 |
### Training results
|
55 |
|
56 |
-
| Training Loss | Epoch | Step | Validation Loss | Answer
|
57 |
-
|
58 |
-
| 1.
|
59 |
-
| 1.
|
60 |
-
| 1.
|
61 |
-
| 1.
|
62 |
-
| 1.
|
63 |
-
| 0.9305 | 6.0 | 60 | 1.1583 | {'precision': 0.3395311236863379, 'recall': 0.519159456118665, 'f1': 0.41055718475073316, 'number': 809} | {'precision': 0.2835820895522388, 'recall': 0.15966386554621848, 'f1': 0.2043010752688172, 'number': 119} | {'precision': 0.4719387755102041, 'recall': 0.5211267605633803, 'f1': 0.49531459170013387, 'number': 1065} | 0.4008 | 0.4987 | 0.4444 | 0.5817 |
|
64 |
-
| 0.8843 | 7.0 | 70 | 1.1142 | {'precision': 0.32987551867219916, 'recall': 0.3930778739184178, 'f1': 0.3587140439932318, 'number': 809} | {'precision': 0.25287356321839083, 'recall': 0.18487394957983194, 'f1': 0.21359223300970878, 'number': 119} | {'precision': 0.41626794258373206, 'recall': 0.6535211267605634, 'f1': 0.5085860431128973, 'number': 1065} | 0.3805 | 0.5198 | 0.4394 | 0.5831 |
|
65 |
-
| 0.8326 | 8.0 | 80 | 1.0891 | {'precision': 0.33364661654135336, 'recall': 0.4388133498145859, 'f1': 0.3790710090763481, 'number': 809} | {'precision': 0.26582278481012656, 'recall': 0.17647058823529413, 'f1': 0.2121212121212121, 'number': 119} | {'precision': 0.42464040025015637, 'recall': 0.6375586854460094, 'f1': 0.5097597597597597, 'number': 1065} | 0.3848 | 0.5294 | 0.4456 | 0.5943 |
|
66 |
-
| 0.7867 | 9.0 | 90 | 1.1168 | {'precision': 0.36489151873767256, 'recall': 0.4573547589616811, 'f1': 0.40592430060340096, 'number': 809} | {'precision': 0.27835051546391754, 'recall': 0.226890756302521, 'f1': 0.25, 'number': 119} | {'precision': 0.4975845410628019, 'recall': 0.5802816901408451, 'f1': 0.5357607282184654, 'number': 1065} | 0.4314 | 0.5093 | 0.4671 | 0.5919 |
|
67 |
-
| 0.7846 | 10.0 | 100 | 1.1754 | {'precision': 0.38025415444770283, 'recall': 0.48084054388133496, 'f1': 0.42467248908296945, 'number': 809} | {'precision': 0.3614457831325301, 'recall': 0.25210084033613445, 'f1': 0.297029702970297, 'number': 119} | {'precision': 0.5054945054945055, 'recall': 0.5615023474178403, 'f1': 0.5320284697508897, 'number': 1065} | 0.4443 | 0.5103 | 0.4750 | 0.5923 |
|
68 |
-
| 0.711 | 11.0 | 110 | 1.1427 | {'precision': 0.3814968814968815, 'recall': 0.453646477132262, 'f1': 0.41445511010728403, 'number': 809} | {'precision': 0.32967032967032966, 'recall': 0.25210084033613445, 'f1': 0.28571428571428575, 'number': 119} | {'precision': 0.4864667154352597, 'recall': 0.6244131455399061, 'f1': 0.5468750000000001, 'number': 1065} | 0.4388 | 0.5329 | 0.4813 | 0.6085 |
|
69 |
-
| 0.7118 | 12.0 | 120 | 1.1172 | {'precision': 0.36363636363636365, 'recall': 0.4796044499381953, 'f1': 0.4136460554371002, 'number': 809} | {'precision': 0.3764705882352941, 'recall': 0.2689075630252101, 'f1': 0.3137254901960785, 'number': 119} | {'precision': 0.47493036211699163, 'recall': 0.64037558685446, 'f1': 0.5453818472610956, 'number': 1065} | 0.4258 | 0.5529 | 0.4811 | 0.6020 |
|
70 |
-
| 0.6891 | 13.0 | 130 | 1.1580 | {'precision': 0.3810375670840787, 'recall': 0.5265760197775031, 'f1': 0.44213803840166066, 'number': 809} | {'precision': 0.3146067415730337, 'recall': 0.23529411764705882, 'f1': 0.2692307692307692, 'number': 119} | {'precision': 0.5264527320034692, 'recall': 0.5699530516431925, 'f1': 0.5473399458972048, 'number': 1065} | 0.4496 | 0.5324 | 0.4875 | 0.6035 |
|
71 |
-
| 0.6544 | 14.0 | 140 | 1.1198 | {'precision': 0.38986556359875907, 'recall': 0.46600741656365885, 'f1': 0.4245495495495496, 'number': 809} | {'precision': 0.3333333333333333, 'recall': 0.24369747899159663, 'f1': 0.2815533980582524, 'number': 119} | {'precision': 0.48421807747489237, 'recall': 0.6338028169014085, 'f1': 0.5490036600244002, 'number': 1065} | 0.4416 | 0.5424 | 0.4868 | 0.6037 |
|
72 |
-
| 0.6515 | 15.0 | 150 | 1.1352 | {'precision': 0.38767395626242546, 'recall': 0.4820766378244747, 'f1': 0.42975206611570255, 'number': 809} | {'precision': 0.3181818181818182, 'recall': 0.23529411764705882, 'f1': 0.27053140096618356, 'number': 119} | {'precision': 0.4954954954954955, 'recall': 0.6197183098591549, 'f1': 0.5506883604505632, 'number': 1065} | 0.4444 | 0.5409 | 0.4879 | 0.6048 |
|
73 |
|
74 |
|
75 |
### Framework versions
|
|
|
17 |
|
18 |
This model is a fine-tuned version of [microsoft/layoutlm-base-uncased](https://huggingface.co/microsoft/layoutlm-base-uncased) on the funsd dataset.
|
19 |
It achieves the following results on the evaluation set:
|
20 |
+
- Loss: 1.1882
|
21 |
+
- Answer: {'precision': 0.24169381107491858, 'recall': 0.45859085290482077, 'f1': 0.31655290102389083, 'number': 809}
|
22 |
+
- Header: {'precision': 0.08955223880597014, 'recall': 0.05042016806722689, 'f1': 0.06451612903225806, 'number': 119}
|
23 |
+
- Question: {'precision': 0.35651074589127685, 'recall': 0.5295774647887324, 'f1': 0.4261428031734038, 'number': 1065}
|
24 |
+
- Overall Precision: 0.2955
|
25 |
+
- Overall Recall: 0.4722
|
26 |
+
- Overall F1: 0.3635
|
27 |
+
- Overall Accuracy: 0.4847
|
28 |
|
29 |
## Model description
|
30 |
|
|
|
49 |
- seed: 42
|
50 |
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
51 |
- lr_scheduler_type: linear
|
52 |
+
- num_epochs: 5
|
53 |
|
54 |
### Training results
|
55 |
|
56 |
+
| Training Loss | Epoch | Step | Validation Loss | Answer | Header | Question | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy |
|
57 |
+
|:-------------:|:-----:|:----:|:---------------:|:-------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------:|:-----------------:|:--------------:|:----------:|:----------------:|
|
58 |
+
| 1.735 | 1.0 | 10 | 1.5463 | {'precision': 0.034482758620689655, 'recall': 0.014833127317676144, 'f1': 0.02074330164217805, 'number': 809} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 119} | {'precision': 0.3490990990990991, 'recall': 0.14553990610328638, 'f1': 0.20543406229290923, 'number': 1065} | 0.2109 | 0.0838 | 0.1199 | 0.3216 |
|
59 |
+
| 1.4649 | 2.0 | 20 | 1.3745 | {'precision': 0.16110761485210826, 'recall': 0.3164400494437577, 'f1': 0.21351125938281904, 'number': 809} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 119} | {'precision': 0.2809198259788689, 'recall': 0.4244131455399061, 'f1': 0.3380703066566941, 'number': 1065} | 0.2213 | 0.3552 | 0.2727 | 0.4138 |
|
60 |
+
| 1.3282 | 3.0 | 30 | 1.2410 | {'precision': 0.21228710462287104, 'recall': 0.43139678615574784, 'f1': 0.2845495311863025, 'number': 809} | {'precision': 0.0, 'recall': 0.0, 'f1': 0.0, 'number': 119} | {'precision': 0.31901840490797545, 'recall': 0.48826291079812206, 'f1': 0.38589981447124305, 'number': 1065} | 0.2635 | 0.4360 | 0.3285 | 0.4488 |
|
61 |
+
| 1.2317 | 4.0 | 40 | 1.2269 | {'precision': 0.2410941475826972, 'recall': 0.4684796044499382, 'f1': 0.3183536329273415, 'number': 809} | {'precision': 0.07142857142857142, 'recall': 0.03361344537815126, 'f1': 0.045714285714285714, 'number': 119} | {'precision': 0.34568690095846644, 'recall': 0.507981220657277, 'f1': 0.41140684410646383, 'number': 1065} | 0.2894 | 0.4636 | 0.3563 | 0.4598 |
|
62 |
+
| 1.1794 | 5.0 | 50 | 1.1882 | {'precision': 0.24169381107491858, 'recall': 0.45859085290482077, 'f1': 0.31655290102389083, 'number': 809} | {'precision': 0.08955223880597014, 'recall': 0.05042016806722689, 'f1': 0.06451612903225806, 'number': 119} | {'precision': 0.35651074589127685, 'recall': 0.5295774647887324, 'f1': 0.4261428031734038, 'number': 1065} | 0.2955 | 0.4722 | 0.3635 | 0.4847 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
|
65 |
### Framework versions
|
logs/events.out.tfevents.1710831582.ccc3f2fe76fd.4084.0
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:335877eb3cdeeed0453d45f4a3565a3cee8b17286e823a4af21b9517abfe42a4
|
3 |
+
size 8673
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 450558212
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4fcdca9b04cbbe9d94788fa01a35204cef9db0ed816e3c449a3045229d1fc149
|
3 |
size 450558212
|
tokenizer.json
CHANGED
@@ -1,21 +1,7 @@
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
-
"truncation":
|
4 |
-
|
5 |
-
"max_length": 512,
|
6 |
-
"strategy": "LongestFirst",
|
7 |
-
"stride": 0
|
8 |
-
},
|
9 |
-
"padding": {
|
10 |
-
"strategy": {
|
11 |
-
"Fixed": 512
|
12 |
-
},
|
13 |
-
"direction": "Right",
|
14 |
-
"pad_to_multiple_of": null,
|
15 |
-
"pad_id": 0,
|
16 |
-
"pad_type_id": 0,
|
17 |
-
"pad_token": "[PAD]"
|
18 |
-
},
|
19 |
"added_tokens": [
|
20 |
{
|
21 |
"id": 0,
|
|
|
1 |
{
|
2 |
"version": "1.0",
|
3 |
+
"truncation": null,
|
4 |
+
"padding": null,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
"added_tokens": [
|
6 |
{
|
7 |
"id": 0,
|