Theoreticallyhugo commited on
Commit
251f723
1 Parent(s): 91030e4

Training in progress, epoch 1

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +26 -18
  2. meta_data/README_s42_e10.md +90 -0
  3. meta_data/README_s42_e11.md +91 -0
  4. meta_data/README_s42_e12.md +92 -0
  5. meta_data/README_s42_e13.md +93 -0
  6. meta_data/README_s42_e14.md +94 -0
  7. meta_data/README_s42_e15.md +95 -0
  8. meta_data/README_s42_e4.md +16 -17
  9. meta_data/README_s42_e5.md +17 -18
  10. meta_data/README_s42_e6.md +18 -19
  11. meta_data/README_s42_e7.md +17 -17
  12. meta_data/README_s42_e8.md +88 -0
  13. meta_data/README_s42_e9.md +89 -0
  14. meta_data/meta_s42_e10_cvi0.json +1 -0
  15. meta_data/meta_s42_e10_cvi1.json +1 -0
  16. meta_data/meta_s42_e10_cvi2.json +1 -0
  17. meta_data/meta_s42_e10_cvi3.json +1 -0
  18. meta_data/meta_s42_e10_cvi4.json +1 -0
  19. meta_data/meta_s42_e11_cvi0.json +1 -0
  20. meta_data/meta_s42_e11_cvi1.json +1 -0
  21. meta_data/meta_s42_e11_cvi2.json +1 -0
  22. meta_data/meta_s42_e11_cvi3.json +1 -0
  23. meta_data/meta_s42_e11_cvi4.json +1 -0
  24. meta_data/meta_s42_e12_cvi0.json +1 -0
  25. meta_data/meta_s42_e12_cvi1.json +1 -0
  26. meta_data/meta_s42_e12_cvi2.json +1 -0
  27. meta_data/meta_s42_e12_cvi3.json +1 -0
  28. meta_data/meta_s42_e12_cvi4.json +1 -0
  29. meta_data/meta_s42_e13_cvi0.json +1 -0
  30. meta_data/meta_s42_e13_cvi1.json +1 -0
  31. meta_data/meta_s42_e13_cvi2.json +1 -0
  32. meta_data/meta_s42_e13_cvi3.json +1 -0
  33. meta_data/meta_s42_e13_cvi4.json +1 -0
  34. meta_data/meta_s42_e14_cvi0.json +1 -0
  35. meta_data/meta_s42_e14_cvi1.json +1 -0
  36. meta_data/meta_s42_e14_cvi2.json +1 -0
  37. meta_data/meta_s42_e14_cvi3.json +1 -0
  38. meta_data/meta_s42_e14_cvi4.json +1 -0
  39. meta_data/meta_s42_e15_cvi0.json +1 -0
  40. meta_data/meta_s42_e15_cvi1.json +1 -0
  41. meta_data/meta_s42_e15_cvi2.json +1 -0
  42. meta_data/meta_s42_e15_cvi3.json +1 -0
  43. meta_data/meta_s42_e15_cvi4.json +1 -0
  44. meta_data/meta_s42_e16_cvi0.json +1 -0
  45. meta_data/meta_s42_e4_cvi0.json +1 -0
  46. meta_data/meta_s42_e4_cvi1.json +1 -0
  47. meta_data/meta_s42_e4_cvi2.json +1 -0
  48. meta_data/meta_s42_e4_cvi3.json +1 -0
  49. meta_data/meta_s42_e4_cvi4.json +1 -0
  50. meta_data/meta_s42_e5_cvi0.json +1 -0
README.md CHANGED
@@ -17,12 +17,12 @@ model-index:
17
  name: essays_su_g
18
  type: essays_su_g
19
  config: sep_tok
20
- split: test
21
  args: sep_tok
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.8970593132847104
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -32,14 +32,14 @@ should probably proofread and complete it, then remove this comment. -->
32
 
33
  This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.2727
36
- - Claim: {'precision': 0.6323296354992076, 'recall': 0.6568673565380997, 'f1-score': 0.6443649786595916, 'support': 4252.0}
37
- - Majorclaim: {'precision': 0.8782371649250341, 'recall': 0.885884509624198, 'f1-score': 0.8820442619210586, 'support': 2182.0}
38
- - O: {'precision': 1.0, 'recall': 0.9996489072237339, 'f1-score': 0.9998244227899218, 'support': 11393.0}
39
- - Premise: {'precision': 0.9002495840266223, 'recall': 0.8869672131147541, 'f1-score': 0.8935590421139553, 'support': 12200.0}
40
- - Accuracy: 0.8971
41
- - Macro avg: {'precision': 0.852704096112716, 'recall': 0.8573419966251964, 'f1-score': 0.8549481763711319, 'support': 30027.0}
42
- - Weighted avg: {'precision': 0.8985587647495203, 'recall': 0.8970593132847104, 'f1-score': 0.897754701815305, 'support': 30027.0}
43
 
44
  ## Model description
45
 
@@ -64,19 +64,27 @@ The following hyperparameters were used during training:
64
  - seed: 42
65
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
  - lr_scheduler_type: linear
67
- - num_epochs: 7
68
 
69
  ### Training results
70
 
71
  | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
  |:-------------:|:-----:|:----:|:---------------:|:-------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
- | No log | 1.0 | 41 | 0.3545 | {'precision': 0.5265911072362686, 'recall': 0.28410159924741296, 'f1-score': 0.3690803544149099, 'support': 4252.0} | {'precision': 0.5903767014878126, 'recall': 0.8547204399633364, 'f1-score': 0.6983710915558883, 'support': 2182.0} | {'precision': 0.9956778689247596, 'recall': 0.9907838146230141, 'f1-score': 0.9932248130224374, 'support': 11393.0} | {'precision': 0.8420336934350684, 'recall': 0.9136065573770492, 'f1-score': 0.8763612061170736, 'support': 12200.0} | 0.8495 | {'precision': 0.7386698427709772, 'recall': 0.7608031028027031, 'f1-score': 0.7342593662775774, 'support': 30027.0} | {'precision': 0.837374242221422, 'recall': 0.8494688114030706, 'f1-score': 0.8359340726059904, 'support': 30027.0} |
74
- | No log | 2.0 | 82 | 0.2887 | {'precision': 0.5331588132635253, 'recall': 0.5747883349012229, 'f1-score': 0.5531914893617021, 'support': 4252.0} | {'precision': 0.9024745269286754, 'recall': 0.5682859761686526, 'f1-score': 0.6974128233970753, 'support': 2182.0} | {'precision': 0.9994723419224343, 'recall': 0.997542350566137, 'f1-score': 0.9985064136355649, 'support': 11393.0} | {'precision': 0.8662781540400063, 'recall': 0.9016393442622951, 'f1-score': 0.8836051088440838, 'support': 12200.0} | 0.8675 | {'precision': 0.8253459590386604, 'recall': 0.7605640014745769, 'f1-score': 0.7831789588096065, 'support': 30027.0} | {'precision': 0.8722740387839361, 'recall': 0.8675192326905785, 'f1-score': 0.8668828351772135, 'support': 30027.0} |
75
- | No log | 3.0 | 123 | 0.2610 | {'precision': 0.6448462929475588, 'recall': 0.4193320790216369, 'f1-score': 0.5081943850648425, 'support': 4252.0} | {'precision': 0.8409090909090909, 'recall': 0.847846012832264, 'f1-score': 0.8443633044272022, 'support': 2182.0} | {'precision': 0.9999121959785758, 'recall': 0.9995611340296673, 'f1-score': 0.9997366341848828, 'support': 11393.0} | {'precision': 0.8441453960359834, 'recall': 0.9460655737704918, 'f1-score': 0.8922042283461523, 'support': 12200.0} | 0.8846 | {'precision': 0.8324532439678022, 'recall': 0.803201199913515, 'f1-score': 0.81112463800577, 'support': 30027.0} | {'precision': 0.8747901406867009, 'recall': 0.8846371598894328, 'f1-score': 0.8751501753304457, 'support': 30027.0} |
76
- | No log | 4.0 | 164 | 0.2530 | {'precision': 0.6281010374379793, 'recall': 0.6549858889934148, 'f1-score': 0.6412618005986644, 'support': 4252.0} | {'precision': 0.8315485996705108, 'recall': 0.9252978918423465, 'f1-score': 0.8759219088937094, 'support': 2182.0} | {'precision': 1.0, 'recall': 0.9996489072237339, 'f1-score': 0.9998244227899218, 'support': 11393.0} | {'precision': 0.9083729619565217, 'recall': 0.8768032786885246, 'f1-score': 0.8923089756423088, 'support': 12200.0} | 0.8955 | {'precision': 0.8420056497662529, 'recall': 0.8641839916870049, 'f1-score': 0.8523292769811512, 'support': 30027.0} | {'precision': 0.8978677454136914, 'recall': 0.8955273587104939, 'f1-score': 0.896362471543389, 'support': 30027.0} |
77
- | No log | 5.0 | 205 | 0.2707 | {'precision': 0.6235240690281563, 'recall': 0.6458137347130762, 'f1-score': 0.6344731977818855, 'support': 4252.0} | {'precision': 0.873358348968105, 'recall': 0.8533455545371219, 'f1-score': 0.8632359758924432, 'support': 2182.0} | {'precision': 1.0, 'recall': 1.0, 'f1-score': 1.0, 'support': 11393.0} | {'precision': 0.8975037196230782, 'recall': 0.89, 'f1-score': 0.8937361099678987, 'support': 12200.0} | 0.8945 | {'precision': 0.848596534404835, 'recall': 0.8472898223125496, 'f1-score': 0.8478613209105568, 'support': 30027.0} | {'precision': 0.8958416637811863, 'recall': 0.8944949545409132, 'f1-score': 0.8951257694066757, 'support': 30027.0} |
78
- | No log | 6.0 | 246 | 0.2700 | {'precision': 0.631960692559663, 'recall': 0.6352304797742239, 'f1-score': 0.6335913675815154, 'support': 4252.0} | {'precision': 0.885956644674835, 'recall': 0.8615948670944088, 'f1-score': 0.8736059479553903, 'support': 2182.0} | {'precision': 1.0, 'recall': 0.9995611340296673, 'f1-score': 0.9997805188534304, 'support': 11393.0} | {'precision': 0.8923466470636282, 'recall': 0.8954918032786885, 'f1-score': 0.8939164586998323, 'support': 12200.0} | 0.8957 | {'precision': 0.8525659960745315, 'recall': 0.8479695710442472, 'f1-score': 0.850223573272542, 'support': 30027.0} | {'precision': 0.8958565077303907, 'recall': 0.8956605721517301, 'f1-score': 0.8957444606797332, 'support': 30027.0} |
79
- | No log | 7.0 | 287 | 0.2727 | {'precision': 0.6323296354992076, 'recall': 0.6568673565380997, 'f1-score': 0.6443649786595916, 'support': 4252.0} | {'precision': 0.8782371649250341, 'recall': 0.885884509624198, 'f1-score': 0.8820442619210586, 'support': 2182.0} | {'precision': 1.0, 'recall': 0.9996489072237339, 'f1-score': 0.9998244227899218, 'support': 11393.0} | {'precision': 0.9002495840266223, 'recall': 0.8869672131147541, 'f1-score': 0.8935590421139553, 'support': 12200.0} | 0.8971 | {'precision': 0.852704096112716, 'recall': 0.8573419966251964, 'f1-score': 0.8549481763711319, 'support': 30027.0} | {'precision': 0.8985587647495203, 'recall': 0.8970593132847104, 'f1-score': 0.897754701815305, 'support': 30027.0} |
 
 
 
 
 
 
 
 
80
 
81
 
82
  ### Framework versions
 
17
  name: essays_su_g
18
  type: essays_su_g
19
  config: sep_tok
20
+ split: train[80%:100%]
21
  args: sep_tok
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
+ value: 0.8996128597879145
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
32
 
33
  This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 0.4440
36
+ - Claim: {'precision': 0.6588266384778013, 'recall': 0.5981285988483686, 'f1-score': 0.6270120724346077, 'support': 4168.0}
37
+ - Majorclaim: {'precision': 0.9058606368251039, 'recall': 0.9121747211895911, 'f1-score': 0.9090067145172495, 'support': 2152.0}
38
+ - O: {'precision': 1.0, 'recall': 0.999557991513437, 'f1-score': 0.9997789469030461, 'support': 11312.0}
39
+ - Premise: {'precision': 0.8805334618783642, 'recall': 0.9078108175267124, 'f1-score': 0.8939641109298531, 'support': 12073.0}
40
+ - Accuracy: 0.8996
41
+ - Macro avg: {'precision': 0.8613051842953173, 'recall': 0.8544180322695273, 'f1-score': 0.857440461196189, 'support': 29705.0}
42
+ - Weighted avg: {'precision': 0.8967541492974446, 'recall': 0.8996128597879145, 'f1-score': 0.8978925071931304, 'support': 29705.0}
43
 
44
  ## Model description
45
 
 
64
  - seed: 42
65
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
  - lr_scheduler_type: linear
67
+ - num_epochs: 15
68
 
69
  ### Training results
70
 
71
  | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
  |:-------------:|:-----:|:----:|:---------------:|:-------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
+ | No log | 1.0 | 41 | 0.3459 | {'precision': 0.5076716016150741, 'recall': 0.45249520153550865, 'f1-score': 0.4784980337434987, 'support': 4168.0} | {'precision': 0.6691762621789193, 'recall': 0.7021375464684015, 'f1-score': 0.6852607709750567, 'support': 2152.0} | {'precision': 0.9995503597122302, 'recall': 0.98258486562942, 'f1-score': 0.9909950071326675, 'support': 11312.0} | {'precision': 0.8565651760228354, 'recall': 0.8948065932245507, 'f1-score': 0.8752683816082643, 'support': 12073.0} | 0.8522 | {'precision': 0.7582408498822648, 'recall': 0.7580060517144703, 'f1-score': 0.7575055483648718, 'support': 29705.0} | {'precision': 0.8484856957054067, 'recall': 0.8522134320821411, 'f1-score': 0.8499010831719419, 'support': 29705.0} |
74
+ | No log | 2.0 | 82 | 0.2620 | {'precision': 0.6607431340872375, 'recall': 0.3925143953934741, 'f1-score': 0.49247441300421435, 'support': 4168.0} | {'precision': 0.7220035778175313, 'recall': 0.9377323420074349, 'f1-score': 0.815847988680008, 'support': 2152.0} | {'precision': 0.9998230714791224, 'recall': 0.9991159830268741, 'f1-score': 0.9994694021931375, 'support': 11312.0} | {'precision': 0.8597867479055598, 'recall': 0.9350617079433446, 'f1-score': 0.8958457326508749, 'support': 12073.0} | 0.8835 | {'precision': 0.8105891328223628, 'recall': 0.8161061070927819, 'f1-score': 0.8009093841320587, 'support': 29705.0} | {'precision': 0.8752039412346267, 'recall': 0.8835212927116647, 'f1-score': 0.8729130325852121, 'support': 29705.0} |
75
+ | No log | 3.0 | 123 | 0.2374 | {'precision': 0.6255685898970553, 'recall': 0.6269193857965452, 'f1-score': 0.6262432594367885, 'support': 4168.0} | {'precision': 0.8762836185819071, 'recall': 0.8327137546468402, 'f1-score': 0.8539432928282107, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9992927864214993, 'f1-score': 0.9996462681287585, 'support': 11312.0} | {'precision': 0.8920272600377699, 'recall': 0.8998591899279383, 'f1-score': 0.8959261091868711, 'support': 12073.0} | 0.8946 | {'precision': 0.848469867129183, 'recall': 0.8396962791982058, 'f1-score': 0.8439397323951572, 'support': 29705.0} | {'precision': 0.894616305009769, 'recall': 0.8945632048476687, 'f1-score': 0.8945424128188674, 'support': 29705.0} |
76
+ | No log | 4.0 | 164 | 0.2502 | {'precision': 0.6409472880061116, 'recall': 0.6038867562380038, 'f1-score': 0.6218653489808523, 'support': 4168.0} | {'precision': 0.8598290598290599, 'recall': 0.9349442379182156, 'f1-score': 0.8958147818343721, 'support': 2152.0} | {'precision': 0.9998231340643792, 'recall': 0.9994695898161244, 'f1-score': 0.9996463306808134, 'support': 11312.0} | {'precision': 0.8900247320692498, 'recall': 0.8942267870454734, 'f1-score': 0.8921208114696525, 'support': 12073.0} | 0.8965 | {'precision': 0.8476560534922002, 'recall': 0.8581318427544543, 'f1-score': 0.8523618182414225, 'support': 29705.0} | {'precision': 0.8947008354139008, 'recall': 0.8965157380912304, 'f1-score': 0.8954149818075824, 'support': 29705.0} |
77
+ | No log | 5.0 | 205 | 0.2594 | {'precision': 0.6565992865636148, 'recall': 0.6624280230326296, 'f1-score': 0.6595007763047892, 'support': 4168.0} | {'precision': 0.8777137793531237, 'recall': 0.9205390334572491, 'f1-score': 0.8986164663188932, 'support': 2152.0} | {'precision': 0.9999115670321896, 'recall': 0.999557991513437, 'f1-score': 0.9997347480106101, 'support': 11312.0} | {'precision': 0.9036447423544198, 'recall': 0.8933156630497805, 'f1-score': 0.8984505164945018, 'support': 12073.0} | 0.9033 | {'precision': 0.8594673438258369, 'recall': 0.868960177763274, 'f1-score': 0.8640756267821986, 'support': 29705.0} | {'precision': 0.903761942443296, 'recall': 0.9033496044436964, 'f1-score': 0.9035049461804666, 'support': 29705.0} |
78
+ | No log | 6.0 | 246 | 0.2753 | {'precision': 0.637114951164538, 'recall': 0.6103646833013435, 'f1-score': 0.6234530082097782, 'support': 4168.0} | {'precision': 0.9366306027820711, 'recall': 0.8447955390334573, 'f1-score': 0.8883459565111166, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.8789410348977136, 'recall': 0.9074794997100969, 'f1-score': 0.8929823131469556, 'support': 12073.0} | 0.8963 | {'precision': 0.8631716472110806, 'recall': 0.8405273279652555, 'f1-score': 0.8511290006058785, 'support': 29705.0} | {'precision': 0.8952896579013939, 'recall': 0.8962800875273523, 'f1-score': 0.895480468184721, 'support': 29705.0} |
79
+ | No log | 7.0 | 287 | 0.2966 | {'precision': 0.6248019914007694, 'recall': 0.6624280230326296, 'f1-score': 0.643065098404565, 'support': 4168.0} | {'precision': 0.8643994834266036, 'recall': 0.9330855018587361, 'f1-score': 0.8974301675977654, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9997347949080623, 'f1-score': 0.9998673798682641, 'support': 11312.0} | {'precision': 0.9038098506950404, 'recall': 0.8724426406029985, 'f1-score': 0.8878492856239727, 'support': 12073.0} | 0.8958 | {'precision': 0.8482528313806034, 'recall': 0.8669227401006067, 'f1-score': 0.8570529828736417, 'support': 29705.0} | {'precision': 0.898436583603221, 'recall': 0.8958424507658643, 'f1-score': 0.8968547139279125, 'support': 29705.0} |
80
+ | No log | 8.0 | 328 | 0.3421 | {'precision': 0.6529668636013357, 'recall': 0.6098848368522073, 'f1-score': 0.6306909812678327, 'support': 4168.0} | {'precision': 0.9046478198370868, 'recall': 0.8773234200743495, 'f1-score': 0.8907761264449163, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9999115983026874, 'f1-score': 0.9999557971975424, 'support': 11312.0} | {'precision': 0.8831158369582729, 'recall': 0.9080593058891742, 'f1-score': 0.8954138930861273, 'support': 12073.0} | 0.8990 | {'precision': 0.8601826300991738, 'recall': 0.8487947902796046, 'f1-score': 0.8542091994991047, 'support': 29705.0} | {'precision': 0.8968936372791452, 'recall': 0.8989732368288167, 'f1-score': 0.8977445596081872, 'support': 29705.0} |
81
+ | No log | 9.0 | 369 | 0.3315 | {'precision': 0.6249177451195438, 'recall': 0.6835412667946257, 'f1-score': 0.6529162369657385, 'support': 4168.0} | {'precision': 0.9221616261774913, 'recall': 0.8643122676579925, 'f1-score': 0.8923003118253778, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9990275813295615, 'f1-score': 0.9995135541502675, 'support': 11312.0} | {'precision': 0.9006594521474467, 'recall': 0.8823821751014661, 'f1-score': 0.8914271369398771, 'support': 12073.0} | 0.8976 | {'precision': 0.8619347058611204, 'recall': 0.8573158227209113, 'f1-score': 0.8590393099703152, 'support': 29705.0} | {'precision': 0.901357029017618, 'recall': 0.8975929978118162, 'f1-score': 0.8991847263270282, 'support': 29705.0} |
82
+ | No log | 10.0 | 410 | 0.3580 | {'precision': 0.6654402102496715, 'recall': 0.6074856046065259, 'f1-score': 0.635143609682679, 'support': 4168.0} | {'precision': 0.8756989247311828, 'recall': 0.9460966542750929, 'f1-score': 0.9095376368103641, 'support': 2152.0} | {'precision': 0.9999115592111082, 'recall': 0.9994695898161244, 'f1-score': 0.9996905256642645, 'support': 11312.0} | {'precision': 0.8878382784479948, 'recall': 0.9021784146442475, 'f1-score': 0.8949509058789696, 'support': 12073.0} | 0.9011 | {'precision': 0.8572222431599893, 'recall': 0.8638075658354977, 'f1-score': 0.8598306695090694, 'support': 29705.0} | {'precision': 0.898432249649582, 'recall': 0.9010604275374516, 'f1-score': 0.8994393224226316, 'support': 29705.0} |
83
+ | No log | 11.0 | 451 | 0.3818 | {'precision': 0.6605737496826606, 'recall': 0.6242802303262955, 'f1-score': 0.6419143949673121, 'support': 4168.0} | {'precision': 0.9070716228467816, 'recall': 0.9298327137546468, 'f1-score': 0.9183111519045434, 'support': 2152.0} | {'precision': 0.9999115826702034, 'recall': 0.9997347949080623, 'f1-score': 0.9998231809742728, 'support': 11312.0} | {'precision': 0.8877551020408163, 'recall': 0.9007703139236313, 'f1-score': 0.8942153517247049, 'support': 12073.0} | 0.9018 | {'precision': 0.8638280143101154, 'recall': 0.8636545132281589, 'f1-score': 0.8635660198927083, 'support': 29705.0} | {'precision': 0.8999884427250537, 'recall': 0.901767379229086, 'f1-score': 0.9007765211808001, 'support': 29705.0} |
84
+ | No log | 12.0 | 492 | 0.4293 | {'precision': 0.6597707576181158, 'recall': 0.5662188099808061, 'f1-score': 0.6094254357650096, 'support': 4168.0} | {'precision': 0.9170684667309547, 'recall': 0.8838289962825279, 'f1-score': 0.9001419782300046, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.8703326011923439, 'recall': 0.9189927938374887, 'f1-score': 0.8940010475001007, 'support': 12073.0} | 0.8976 | {'precision': 0.8617929563853536, 'recall': 0.8421275474792367, 'f1-score': 0.8508257965126946, 'support': 29705.0} | {'precision': 0.8935526460983838, 'recall': 0.8975929978118162, 'f1-score': 0.8947808316465885, 'support': 29705.0} |
85
+ | 0.1673 | 13.0 | 533 | 0.4672 | {'precision': 0.686539643515673, 'recall': 0.5359884836852208, 'f1-score': 0.6019940716787928, 'support': 4168.0} | {'precision': 0.9140989729225023, 'recall': 0.9098513011152416, 'f1-score': 0.911970190964136, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9993811881188119, 'f1-score': 0.9996904982977407, 'support': 11312.0} | {'precision': 0.8640418332820671, 'recall': 0.9306717468731881, 'f1-score': 0.896119950552299, 'support': 12073.0} | 0.8999 | {'precision': 0.8661701124300606, 'recall': 0.8439731799481155, 'f1-score': 0.852443677873242, 'support': 29705.0} | {'precision': 0.8945367876491145, 'recall': 0.8999495034505975, 'f1-score': 0.8954393610999487, 'support': 29705.0} |
86
+ | 0.1673 | 14.0 | 574 | 0.4210 | {'precision': 0.6521739130434783, 'recall': 0.6477927063339731, 'f1-score': 0.6499759268175253, 'support': 4168.0} | {'precision': 0.9036697247706422, 'recall': 0.9154275092936803, 'f1-score': 0.909510618651893, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.8944449043795016, 'recall': 0.8948894226787045, 'f1-score': 0.8946671083140114, 'support': 12073.0} | 0.9015 | {'precision': 0.8625721355484055, 'recall': 0.8643948070306205, 'f1-score': 0.8634720945847734, 'support': 29705.0} | {'precision': 0.9013159888182246, 'recall': 0.9015317286652079, 'f1-score': 0.9014200207764028, 'support': 29705.0} |
87
+ | 0.1673 | 15.0 | 615 | 0.4440 | {'precision': 0.6588266384778013, 'recall': 0.5981285988483686, 'f1-score': 0.6270120724346077, 'support': 4168.0} | {'precision': 0.9058606368251039, 'recall': 0.9121747211895911, 'f1-score': 0.9090067145172495, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.999557991513437, 'f1-score': 0.9997789469030461, 'support': 11312.0} | {'precision': 0.8805334618783642, 'recall': 0.9078108175267124, 'f1-score': 0.8939641109298531, 'support': 12073.0} | 0.8996 | {'precision': 0.8613051842953173, 'recall': 0.8544180322695273, 'f1-score': 0.857440461196189, 'support': 29705.0} | {'precision': 0.8967541492974446, 'recall': 0.8996128597879145, 'f1-score': 0.8978925071931304, 'support': 29705.0} |
88
 
89
 
90
  ### Framework versions
meta_data/README_s42_e10.md ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: allenai/longformer-base-4096
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - essays_su_g
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: longformer-sep_tok
12
+ results:
13
+ - task:
14
+ name: Token Classification
15
+ type: token-classification
16
+ dataset:
17
+ name: essays_su_g
18
+ type: essays_su_g
19
+ config: sep_tok
20
+ split: train[80%:100%]
21
+ args: sep_tok
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.8962127587948157
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # longformer-sep_tok
32
+
33
+ This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.3310
36
+ - Claim: {'precision': 0.6261209473442171, 'recall': 0.6533109404990403, 'f1-score': 0.6394270282963485, 'support': 4168.0}
37
+ - Majorclaim: {'precision': 0.9034416826003824, 'recall': 0.8782527881040892, 'f1-score': 0.8906691800188501, 'support': 2152.0}
38
+ - O: {'precision': 0.9998231340643792, 'recall': 0.9994695898161244, 'f1-score': 0.9996463306808134, 'support': 11312.0}
39
+ - Premise: {'precision': 0.8951990632318502, 'recall': 0.8865236478091609, 'f1-score': 0.8908402347163843, 'support': 12073.0}
40
+ - Accuracy: 0.8962
41
+ - Macro avg: {'precision': 0.8561462068102073, 'recall': 0.8543892415571037, 'f1-score': 0.8551456934280991, 'support': 29705.0}
42
+ - Weighted avg: {'precision': 0.8978830564693184, 'recall': 0.8962127587948157, 'f1-score': 0.8969858736149474, 'support': 29705.0}
43
+
44
+ ## Model description
45
+
46
+ More information needed
47
+
48
+ ## Intended uses & limitations
49
+
50
+ More information needed
51
+
52
+ ## Training and evaluation data
53
+
54
+ More information needed
55
+
56
+ ## Training procedure
57
+
58
+ ### Training hyperparameters
59
+
60
+ The following hyperparameters were used during training:
61
+ - learning_rate: 2e-05
62
+ - train_batch_size: 8
63
+ - eval_batch_size: 8
64
+ - seed: 42
65
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
+ - lr_scheduler_type: linear
67
+ - num_epochs: 10
68
+
69
+ ### Training results
70
+
71
+ | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
+ |:-------------:|:-----:|:----:|:---------------:|:-------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
+ | No log | 1.0 | 41 | 0.3618 | {'precision': 0.459572685379137, 'recall': 0.2631957773512476, 'f1-score': 0.33470633104500386, 'support': 4168.0} | {'precision': 0.7210526315789474, 'recall': 0.5729553903345725, 'f1-score': 0.6385292594510618, 'support': 2152.0} | {'precision': 0.9996423462088698, 'recall': 0.9883309759547383, 'f1-score': 0.9939544807965861, 'support': 11312.0} | {'precision': 0.8006100942872989, 'recall': 0.956514536569204, 'f1-score': 0.8716458466996264, 'support': 12073.0} | 0.8436 | {'precision': 0.7452194393635632, 'recall': 0.6952491700524406, 'f1-score': 0.7097089794980695, 'support': 29705.0} | {'precision': 0.8227882209885014, 'recall': 0.8435616899511866, 'f1-score': 0.8259944234340963, 'support': 29705.0} |
74
+ | No log | 2.0 | 82 | 0.2635 | {'precision': 0.6204819277108434, 'recall': 0.42010556621880996, 'f1-score': 0.5010014306151646, 'support': 4168.0} | {'precision': 0.7734307824591573, 'recall': 0.8359665427509294, 'f1-score': 0.8034836980794997, 'support': 2152.0} | {'precision': 0.9996458923512748, 'recall': 0.9982319660537482, 'f1-score': 0.9989384288747346, 'support': 11312.0} | {'precision': 0.8523489932885906, 'recall': 0.9362213203014992, 'f1-score': 0.8923186231941264, 'support': 12073.0} | 0.8802 | {'precision': 0.8114768989524666, 'recall': 0.7976313488312466, 'f1-score': 0.7989355451908814, 'support': 29705.0} | {'precision': 0.8701900504562087, 'recall': 0.8801548560848342, 'f1-score': 0.8715780214214702, 'support': 29705.0} |
75
+ | No log | 3.0 | 123 | 0.2378 | {'precision': 0.6241271370093908, 'recall': 0.6218809980806143, 'f1-score': 0.623002043023675, 'support': 4168.0} | {'precision': 0.8295557570262919, 'recall': 0.8503717472118959, 'f1-score': 0.8398347865993575, 'support': 2152.0} | {'precision': 0.9996462681287585, 'recall': 0.9992927864214993, 'f1-score': 0.9994694960212203, 'support': 11312.0} | {'precision': 0.8973251370659578, 'recall': 0.8947237637703968, 'f1-score': 0.8960225623159553, 'support': 12073.0} | 0.8930 | {'precision': 0.8376635748075998, 'recall': 0.8415673238711016, 'f1-score': 0.8395822219900521, 'support': 29705.0} | {'precision': 0.8930473274211594, 'recall': 0.893048308365595, 'f1-score': 0.8930374115440858, 'support': 29705.0} |
76
+ | No log | 4.0 | 164 | 0.2418 | {'precision': 0.6509234828496042, 'recall': 0.591890595009597, 'f1-score': 0.6200050263885398, 'support': 4168.0} | {'precision': 0.8957732949087416, 'recall': 0.866635687732342, 'f1-score': 0.8809636277751535, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9993811881188119, 'f1-score': 0.9996904982977407, 'support': 11312.0} | {'precision': 0.8806673052362708, 'recall': 0.913857367679947, 'f1-score': 0.896955408316735, 'support': 12073.0} | 0.8978 | {'precision': 0.8568410207486541, 'recall': 0.8429412096351745, 'f1-score': 0.8494036401945422, 'support': 29705.0} | {'precision': 0.8949688464325286, 'recall': 0.8978286483756943, 'f1-score': 0.8960597959370997, 'support': 29705.0} |
77
+ | No log | 5.0 | 205 | 0.2518 | {'precision': 0.633635729239358, 'recall': 0.6535508637236085, 'f1-score': 0.6434392346758001, 'support': 4168.0} | {'precision': 0.8811835413777162, 'recall': 0.8856877323420075, 'f1-score': 0.8834298957126304, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.8996397754879786, 'recall': 0.8895055081587012, 'f1-score': 0.8945439400249896, 'support': 12073.0} | 0.8980 | {'precision': 0.8536147615262633, 'recall': 0.8570534235101104, 'f1-score': 0.855286948742271, 'support': 29705.0} | {'precision': 0.8991971624299228, 'recall': 0.8979969702070358, 'f1-score': 0.8985633414508561, 'support': 29705.0} |
78
+ | No log | 6.0 | 246 | 0.2668 | {'precision': 0.6150532759011562, 'recall': 0.650911708253359, 'f1-score': 0.6324746473948013, 'support': 4168.0} | {'precision': 0.9063266307013241, 'recall': 0.8587360594795539, 'f1-score': 0.8818897637795274, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9992043847241867, 'f1-score': 0.9996020340481981, 'support': 11312.0} | {'precision': 0.8931559571619813, 'recall': 0.8842044230928519, 'f1-score': 0.8886576482830385, 'support': 12073.0} | 0.8934 | {'precision': 0.8536339659411154, 'recall': 0.8482641438874878, 'f1-score': 0.8506560233763915, 'support': 29705.0} | {'precision': 0.8957760927130742, 'recall': 0.8934186163945463, 'f1-score': 0.8944703955182431, 'support': 29705.0} |
79
+ | No log | 7.0 | 287 | 0.3051 | {'precision': 0.6341149188353518, 'recall': 0.5904510556621881, 'f1-score': 0.6115045347248106, 'support': 4168.0} | {'precision': 0.9206932773109243, 'recall': 0.8145910780669146, 'f1-score': 0.8643984220907298, 'support': 2152.0} | {'precision': 0.9998231497037757, 'recall': 0.999557991513437, 'f1-score': 0.9996905530259493, 'support': 11312.0} | {'precision': 0.875901990325906, 'recall': 0.9149341505839477, 'f1-score': 0.8949927078269324, 'support': 12073.0} | 0.8944 | {'precision': 0.8576333340439894, 'recall': 0.8298835689566219, 'f1-score': 0.8426465544171056, 'support': 29705.0} | {'precision': 0.8924116180149008, 'recall': 0.8943612186500589, 'f1-score': 0.8928693082813114, 'support': 29705.0} |
80
+ | No log | 8.0 | 328 | 0.3102 | {'precision': 0.6281418839558375, 'recall': 0.6415547024952015, 'f1-score': 0.6347774480712166, 'support': 4168.0} | {'precision': 0.9114481409001957, 'recall': 0.8657063197026023, 'f1-score': 0.8879885605338417, 'support': 2152.0} | {'precision': 0.9998231340643792, 'recall': 0.9994695898161244, 'f1-score': 0.9996463306808134, 'support': 11312.0} | {'precision': 0.890625, 'recall': 0.8923217095999337, 'f1-score': 0.8914725474781745, 'support': 12073.0} | 0.8960 | {'precision': 0.8575095397301031, 'recall': 0.8497630804034655, 'f1-score': 0.8534712216910115, 'support': 29705.0} | {'precision': 0.8968876178785191, 'recall': 0.8960107725972059, 'f1-score': 0.8963962681095783, 'support': 29705.0} |
81
+ | No log | 9.0 | 369 | 0.3262 | {'precision': 0.6178665496049166, 'recall': 0.675383877159309, 'f1-score': 0.645346171480972, 'support': 4168.0} | {'precision': 0.9207772795216741, 'recall': 0.8587360594795539, 'f1-score': 0.8886751622986293, 'support': 2152.0} | {'precision': 0.9998231340643792, 'recall': 0.9994695898161244, 'f1-score': 0.9996463306808134, 'support': 11312.0} | {'precision': 0.898259252999831, 'recall': 0.8804770976559264, 'f1-score': 0.8892792905843476, 'support': 12073.0} | 0.8954 | {'precision': 0.8591815540477001, 'recall': 0.8535166560277284, 'f1-score': 0.8557367387611907, 'support': 29705.0} | {'precision': 0.8992244988482462, 'recall': 0.8954384783706447, 'f1-score': 0.897037534417961, 'support': 29705.0} |
82
+ | No log | 10.0 | 410 | 0.3310 | {'precision': 0.6261209473442171, 'recall': 0.6533109404990403, 'f1-score': 0.6394270282963485, 'support': 4168.0} | {'precision': 0.9034416826003824, 'recall': 0.8782527881040892, 'f1-score': 0.8906691800188501, 'support': 2152.0} | {'precision': 0.9998231340643792, 'recall': 0.9994695898161244, 'f1-score': 0.9996463306808134, 'support': 11312.0} | {'precision': 0.8951990632318502, 'recall': 0.8865236478091609, 'f1-score': 0.8908402347163843, 'support': 12073.0} | 0.8962 | {'precision': 0.8561462068102073, 'recall': 0.8543892415571037, 'f1-score': 0.8551456934280991, 'support': 29705.0} | {'precision': 0.8978830564693184, 'recall': 0.8962127587948157, 'f1-score': 0.8969858736149474, 'support': 29705.0} |
83
+
84
+
85
+ ### Framework versions
86
+
87
+ - Transformers 4.37.2
88
+ - Pytorch 2.2.0+cu121
89
+ - Datasets 2.17.0
90
+ - Tokenizers 0.15.2
meta_data/README_s42_e11.md ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: allenai/longformer-base-4096
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - essays_su_g
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: longformer-sep_tok
12
+ results:
13
+ - task:
14
+ name: Token Classification
15
+ type: token-classification
16
+ dataset:
17
+ name: essays_su_g
18
+ type: essays_su_g
19
+ config: sep_tok
20
+ split: train[80%:100%]
21
+ args: sep_tok
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.8955394714694496
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # longformer-sep_tok
32
+
33
+ This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.3410
36
+ - Claim: {'precision': 0.6256446319737459, 'recall': 0.6403550863723608, 'f1-score': 0.6329143941190419, 'support': 4168.0}
37
+ - Majorclaim: {'precision': 0.9062196307094267, 'recall': 0.866635687732342, 'f1-score': 0.8859857482185273, 'support': 2152.0}
38
+ - O: {'precision': 0.9999115904871364, 'recall': 0.9998231966053748, 'f1-score': 0.9998673915926268, 'support': 11312.0}
39
+ - Premise: {'precision': 0.8913007456503729, 'recall': 0.8910792677876253, 'f1-score': 0.8911899929586214, 'support': 12073.0}
40
+ - Accuracy: 0.8955
41
+ - Macro avg: {'precision': 0.8557691497051705, 'recall': 0.8494733096244258, 'f1-score': 0.8524893817222043, 'support': 29705.0}
42
+ - Weighted avg: {'precision': 0.8964667660387375, 'recall': 0.8955394714694496, 'f1-score': 0.8959591059935927, 'support': 29705.0}
43
+
44
+ ## Model description
45
+
46
+ More information needed
47
+
48
+ ## Intended uses & limitations
49
+
50
+ More information needed
51
+
52
+ ## Training and evaluation data
53
+
54
+ More information needed
55
+
56
+ ## Training procedure
57
+
58
+ ### Training hyperparameters
59
+
60
+ The following hyperparameters were used during training:
61
+ - learning_rate: 2e-05
62
+ - train_batch_size: 8
63
+ - eval_batch_size: 8
64
+ - seed: 42
65
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
+ - lr_scheduler_type: linear
67
+ - num_epochs: 11
68
+
69
+ ### Training results
70
+
71
+ | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
+ |:-------------:|:-----:|:----:|:---------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
+ | No log | 1.0 | 41 | 0.4047 | {'precision': 0.4120455380095483, 'recall': 0.26919385796545103, 'f1-score': 0.325642141924249, 'support': 4168.0} | {'precision': 0.7177489177489178, 'recall': 0.38522304832713755, 'f1-score': 0.5013607499244028, 'support': 2152.0} | {'precision': 0.9997273223050355, 'recall': 0.9723302687411598, 'f1-score': 0.9858384870484898, 'support': 11312.0} | {'precision': 0.7869139966273188, 'recall': 0.9662884121593639, 'f1-score': 0.8674250873670905, 'support': 12073.0} | 0.8287 | {'precision': 0.7291089436727051, 'recall': 0.6482588967982781, 'f1-score': 0.670066616566058, 'support': 29705.0} | {'precision': 0.8103460570481619, 'recall': 0.8286820400605959, 'f1-score': 0.8099792232503951, 'support': 29705.0} |
74
+ | No log | 2.0 | 82 | 0.2642 | {'precision': 0.636955636955637, 'recall': 0.3754798464491363, 'f1-score': 0.47245283018867923, 'support': 4168.0} | {'precision': 0.7393258426966293, 'recall': 0.9172862453531598, 'f1-score': 0.8187474077146413, 'support': 2152.0} | {'precision': 0.9999115200849407, 'recall': 0.9990275813295615, 'f1-score': 0.9994693552666489, 'support': 11312.0} | {'precision': 0.8513859596263935, 'recall': 0.9362213203014992, 'f1-score': 0.8917906031796126, 'support': 12073.0} | 0.8801 | {'precision': 0.8068947398409001, 'recall': 0.8070037483583392, 'f1-score': 0.7956150490873954, 'support': 29705.0} | {'precision': 0.8697405189053876, 'recall': 0.8800875273522976, 'f1-score': 0.8686656494392229, 'support': 29705.0} |
75
+ | No log | 3.0 | 123 | 0.2391 | {'precision': 0.6173835125448028, 'recall': 0.6612284069097889, 'f1-score': 0.6385542168674699, 'support': 4168.0} | {'precision': 0.8279618701770313, 'recall': 0.8475836431226765, 'f1-score': 0.8376578645235362, 'support': 2152.0} | {'precision': 0.9999115122555526, 'recall': 0.998939179632249, 'f1-score': 0.9994251094503163, 'support': 11312.0} | {'precision': 0.9087501065008095, 'recall': 0.8834589580054667, 'f1-score': 0.8959260814783705, 'support': 12073.0} | 0.8937 | {'precision': 0.8385017503695491, 'recall': 0.8478025469175453, 'f1-score': 0.8428908180799233, 'support': 29705.0} | {'precision': 0.8967300955168085, 'recall': 0.8936542669584245, 'f1-score': 0.8950057606513586, 'support': 29705.0} |
76
+ | No log | 4.0 | 164 | 0.2481 | {'precision': 0.6269047619047619, 'recall': 0.6317178502879078, 'f1-score': 0.6293021032504779, 'support': 4168.0} | {'precision': 0.8501953973078593, 'recall': 0.9098513011152416, 'f1-score': 0.8790123456790123, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8994028093195391, 'recall': 0.8856953532676219, 'f1-score': 0.8924964527168016, 'support': 12073.0} | 0.8953 | {'precision': 0.8441036436622543, 'recall': 0.8568161261676929, 'f1-score': 0.8501916756878161, 'support': 29705.0} | {'precision': 0.8958777898648119, 'recall': 0.8953374852718398, 'f1-score': 0.8955117128429093, 'support': 29705.0} |
77
+ | No log | 5.0 | 205 | 0.2598 | {'precision': 0.6341782074732166, 'recall': 0.5822936660268714, 'f1-score': 0.6071294559099436, 'support': 4168.0} | {'precision': 0.8946587537091988, 'recall': 0.8406133828996283, 'f1-score': 0.8667944417824629, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9992927864214993, 'f1-score': 0.9996462681287585, 'support': 11312.0} | {'precision': 0.8773103887826641, 'recall': 0.9121179491427152, 'f1-score': 0.8943756345177665, 'support': 12073.0} | 0.8939 | {'precision': 0.85153683749127, 'recall': 0.8335794461226784, 'f1-score': 0.8419864500847328, 'support': 29705.0} | {'precision': 0.8911741703586489, 'recall': 0.8938562531560343, 'f1-score': 0.8921613476368966, 'support': 29705.0} |
78
+ | No log | 6.0 | 246 | 0.2705 | {'precision': 0.5968630775752437, 'recall': 0.6756238003838771, 'f1-score': 0.6338059869457574, 'support': 4168.0} | {'precision': 0.897196261682243, 'recall': 0.8475836431226765, 'f1-score': 0.87168458781362, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.9010130494505495, 'recall': 0.8692951213451503, 'f1-score': 0.8848699464609417, 'support': 12073.0} | 0.8901 | {'precision': 0.8487680971770091, 'recall': 0.8479930386669572, 'f1-score': 0.8475238114439957, 'support': 29705.0} | {'precision': 0.895755671048318, 'recall': 0.8901195085002525, 'f1-score': 0.892428973383654, 'support': 29705.0} |
79
+ | No log | 7.0 | 287 | 0.3331 | {'precision': 0.6148930258405112, 'recall': 0.5309500959692899, 'f1-score': 0.5698467876915154, 'support': 4168.0} | {'precision': 0.9280045351473923, 'recall': 0.7606877323420075, 'f1-score': 0.8360572012257406, 'support': 2152.0} | {'precision': 0.9999115904871364, 'recall': 0.9998231966053748, 'f1-score': 0.9998673915926268, 'support': 11312.0} | {'precision': 0.8592586908142122, 'recall': 0.9274413981611861, 'f1-score': 0.8920490758444869, 'support': 12073.0} | 0.8873 | {'precision': 0.8505169605723131, 'recall': 0.8047256057694646, 'f1-score': 0.8244551140885924, 'support': 29705.0} | {'precision': 0.8835135491375496, 'recall': 0.8872917017337149, 'f1-score': 0.8838419435954323, 'support': 29705.0} |
80
+ | No log | 8.0 | 328 | 0.3164 | {'precision': 0.601327525409666, 'recall': 0.6955374280230326, 'f1-score': 0.6450105684725775, 'support': 4168.0} | {'precision': 0.8873751135331517, 'recall': 0.9079925650557621, 'f1-score': 0.8975654570509877, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.999557991513437, 'f1-score': 0.9997789469030461, 'support': 11312.0} | {'precision': 0.9086593406593406, 'recall': 0.8561252381346807, 'f1-score': 0.881610371886728, 'support': 12073.0} | 0.8920 | {'precision': 0.8493404949005396, 'recall': 0.8648033056817281, 'f1-score': 0.8559913360783348, 'support': 29705.0} | {'precision': 0.8987782726817388, 'recall': 0.8919710486450093, 'f1-score': 0.894568132641749, 'support': 29705.0} |
81
+ | No log | 9.0 | 369 | 0.3299 | {'precision': 0.6149536737772032, 'recall': 0.6847408829174664, 'f1-score': 0.6479736632988989, 'support': 4168.0} | {'precision': 0.9139676113360324, 'recall': 0.8392193308550185, 'f1-score': 0.8750000000000001, 'support': 2152.0} | {'precision': 0.9999115748518879, 'recall': 0.9996463932107497, 'f1-score': 0.9997789664471067, 'support': 11312.0} | {'precision': 0.9021988284234654, 'recall': 0.8802286092934648, 'f1-score': 0.8910783162837498, 'support': 12073.0} | 0.8953 | {'precision': 0.8577579220971472, 'recall': 0.8509588040691748, 'f1-score': 0.8534577365074388, 'support': 29705.0} | {'precision': 0.8999572934583262, 'recall': 0.8953038209055715, 'f1-score': 0.8971971859812554, 'support': 29705.0} |
82
+ | No log | 10.0 | 410 | 0.3392 | {'precision': 0.6190580985915493, 'recall': 0.6749040307101728, 'f1-score': 0.6457759412304866, 'support': 4168.0} | {'precision': 0.889295516925892, 'recall': 0.9033457249070632, 'f1-score': 0.8962655601659751, 'support': 2152.0} | {'precision': 0.9999115826702034, 'recall': 0.9997347949080623, 'f1-score': 0.9998231809742728, 'support': 11312.0} | {'precision': 0.9038148306900986, 'recall': 0.8732709351445374, 'f1-score': 0.8882803943044907, 'support': 12073.0} | 0.8958 | {'precision': 0.8530200072194358, 'recall': 0.8628138714174589, 'f1-score': 0.8575362691688063, 'support': 29705.0} | {'precision': 0.8994026049971722, 'recall': 0.8957751220333278, 'f1-score': 0.8973090938274679, 'support': 29705.0} |
83
+ | No log | 11.0 | 451 | 0.3410 | {'precision': 0.6256446319737459, 'recall': 0.6403550863723608, 'f1-score': 0.6329143941190419, 'support': 4168.0} | {'precision': 0.9062196307094267, 'recall': 0.866635687732342, 'f1-score': 0.8859857482185273, 'support': 2152.0} | {'precision': 0.9999115904871364, 'recall': 0.9998231966053748, 'f1-score': 0.9998673915926268, 'support': 11312.0} | {'precision': 0.8913007456503729, 'recall': 0.8910792677876253, 'f1-score': 0.8911899929586214, 'support': 12073.0} | 0.8955 | {'precision': 0.8557691497051705, 'recall': 0.8494733096244258, 'f1-score': 0.8524893817222043, 'support': 29705.0} | {'precision': 0.8964667660387375, 'recall': 0.8955394714694496, 'f1-score': 0.8959591059935927, 'support': 29705.0} |
84
+
85
+
86
+ ### Framework versions
87
+
88
+ - Transformers 4.37.2
89
+ - Pytorch 2.2.0+cu121
90
+ - Datasets 2.17.0
91
+ - Tokenizers 0.15.2
meta_data/README_s42_e12.md ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: allenai/longformer-base-4096
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - essays_su_g
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: longformer-sep_tok
12
+ results:
13
+ - task:
14
+ name: Token Classification
15
+ type: token-classification
16
+ dataset:
17
+ name: essays_su_g
18
+ type: essays_su_g
19
+ config: sep_tok
20
+ split: train[80%:100%]
21
+ args: sep_tok
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.8943612186500589
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # longformer-sep_tok
32
+
33
+ This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.3879
36
+ - Claim: {'precision': 0.6307230422817113, 'recall': 0.6048464491362764, 'f1-score': 0.6175137783221066, 'support': 4168.0}
37
+ - Majorclaim: {'precision': 0.8988711194731891, 'recall': 0.8880111524163569, 'f1-score': 0.8934081346423562, 'support': 2152.0}
38
+ - O: {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0}
39
+ - Premise: {'precision': 0.8821419838617655, 'recall': 0.8964631823076286, 'f1-score': 0.8892449264645469, 'support': 12073.0}
40
+ - Accuracy: 0.8944
41
+ - Macro avg: {'precision': 0.8529119379333807, 'recall': 0.8473301959650654, 'f1-score': 0.8500306601334956, 'support': 29705.0}
42
+ - Weighted avg: {'precision': 0.892924576633343, 'recall': 0.8943612186500589, 'f1-score': 0.8935790524525439, 'support': 29705.0}
43
+
44
+ ## Model description
45
+
46
+ More information needed
47
+
48
+ ## Intended uses & limitations
49
+
50
+ More information needed
51
+
52
+ ## Training and evaluation data
53
+
54
+ More information needed
55
+
56
+ ## Training procedure
57
+
58
+ ### Training hyperparameters
59
+
60
+ The following hyperparameters were used during training:
61
+ - learning_rate: 2e-05
62
+ - train_batch_size: 8
63
+ - eval_batch_size: 8
64
+ - seed: 42
65
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
+ - lr_scheduler_type: linear
67
+ - num_epochs: 12
68
+
69
+ ### Training results
70
+
71
+ | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
+ |:-------------:|:-----:|:----:|:---------------:|:--------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
+ | No log | 1.0 | 41 | 0.3346 | {'precision': 0.5123664980326026, 'recall': 0.43738003838771594, 'f1-score': 0.47191302096815946, 'support': 4168.0} | {'precision': 0.7102300538423887, 'recall': 0.6742565055762082, 'f1-score': 0.6917759237187128, 'support': 2152.0} | {'precision': 0.9996437160416852, 'recall': 0.9921322489391796, 'f1-score': 0.9958738187142286, 'support': 11312.0} | {'precision': 0.8501203696513163, 'recall': 0.9067340346227118, 'f1-score': 0.8775150300601202, 'support': 12073.0} | 0.8566 | {'precision': 0.7680901593919982, 'recall': 0.7526257068814539, 'f1-score': 0.7592694483653053, 'support': 29705.0} | {'precision': 0.8495348115917386, 'recall': 0.8565561353307524, 'f1-score': 0.8522201263911512, 'support': 29705.0} |
74
+ | No log | 2.0 | 82 | 0.2693 | {'precision': 0.6444533120510774, 'recall': 0.3874760076775432, 'f1-score': 0.4839676356008391, 'support': 4168.0} | {'precision': 0.7956173619890434, 'recall': 0.8773234200743495, 'f1-score': 0.834475138121547, 'support': 2152.0} | {'precision': 0.9998227107525929, 'recall': 0.9970827439886846, 'f1-score': 0.9984508476076661, 'support': 11312.0} | {'precision': 0.8414913252122554, 'recall': 0.9440901184461195, 'f1-score': 0.8898430790850183, 'support': 12073.0} | 0.8813 | {'precision': 0.8203461775012423, 'recall': 0.8014930725466742, 'f1-score': 0.8016841751037678, 'support': 29705.0} | {'precision': 0.8708153253980879, 'recall': 0.8813331089042249, 'f1-score': 0.8702413426814749, 'support': 29705.0} |
75
+ | No log | 3.0 | 123 | 0.2430 | {'precision': 0.6065792398310735, 'recall': 0.6547504798464492, 'f1-score': 0.6297450098073151, 'support': 4168.0} | {'precision': 0.8615457562825984, 'recall': 0.8443308550185874, 'f1-score': 0.8528514433231636, 'support': 2152.0} | {'precision': 0.9999115435647944, 'recall': 0.9992927864214993, 'f1-score': 0.9996020692399523, 'support': 11312.0} | {'precision': 0.9014586160108549, 'recall': 0.8804770976559264, 'f1-score': 0.8908443327047978, 'support': 12073.0} | 0.8914 | {'precision': 0.8423737889223304, 'recall': 0.8447128047356155, 'f1-score': 0.8432607137688072, 'support': 29705.0} | {'precision': 0.8946836556485465, 'recall': 0.8914324187847164, 'f1-score': 0.8928724370609561, 'support': 29705.0} |
76
+ | No log | 4.0 | 164 | 0.2497 | {'precision': 0.6372872745745491, 'recall': 0.6019673704414588, 'f1-score': 0.6191239975323873, 'support': 4168.0} | {'precision': 0.8602197802197802, 'recall': 0.9093866171003717, 'f1-score': 0.8841201716738197, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8892446633825944, 'recall': 0.8971258179408598, 'f1-score': 0.8931678555230281, 'support': 12073.0} | 0.8958 | {'precision': 0.8466658310734451, 'recall': 0.8521199513706725, 'f1-score': 0.8490919564585518, 'support': 29705.0} | {'precision': 0.8939322416048354, 'recall': 0.8957751220333278, 'f1-score': 0.8947265097790277, 'support': 29705.0} |
77
+ | No log | 5.0 | 205 | 0.2560 | {'precision': 0.6361584754262788, 'recall': 0.6086852207293666, 'f1-score': 0.6221186856302108, 'support': 4168.0} | {'precision': 0.889348025711662, 'recall': 0.900092936802974, 'f1-score': 0.894688221709007, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.999557991513437, 'f1-score': 0.9997789469030461, 'support': 11312.0} | {'precision': 0.8856278613472858, 'recall': 0.8972914768491675, 'f1-score': 0.8914215182061304, 'support': 12073.0} | 0.8959 | {'precision': 0.8527835906213067, 'recall': 0.8514069064737362, 'f1-score': 0.8520018431120986, 'support': 29705.0} | {'precision': 0.8944477578506652, 'recall': 0.8959434438646693, 'f1-score': 0.895135201868183, 'support': 29705.0} |
78
+ | No log | 6.0 | 246 | 0.2836 | {'precision': 0.6055871212121212, 'recall': 0.6137236084452975, 'f1-score': 0.6096282173498571, 'support': 4168.0} | {'precision': 0.9181011997913406, 'recall': 0.8178438661710037, 'f1-score': 0.8650774145981813, 'support': 2152.0} | {'precision': 0.9999115748518879, 'recall': 0.9996463932107497, 'f1-score': 0.9997789664471067, 'support': 11312.0} | {'precision': 0.8807833537331702, 'recall': 0.8940611281371655, 'f1-score': 0.8873725748109175, 'support': 12073.0} | 0.8894 | {'precision': 0.85109581239713, 'recall': 0.8313187489910541, 'f1-score': 0.8404642933015157, 'support': 29705.0} | {'precision': 0.8902386153007307, 'recall': 0.8894125568086181, 'f1-score': 0.8895918454896943, 'support': 29705.0} |
79
+ | No log | 7.0 | 287 | 0.3062 | {'precision': 0.5987467588591184, 'recall': 0.664827255278311, 'f1-score': 0.6300591177808095, 'support': 4168.0} | {'precision': 0.9013605442176871, 'recall': 0.8619888475836431, 'f1-score': 0.8812351543942992, 'support': 2152.0} | {'precision': 1.0, 'recall': 1.0, 'f1-score': 1.0, 'support': 11312.0} | {'precision': 0.8968992910224652, 'recall': 0.8697092686159198, 'f1-score': 0.8830950378469301, 'support': 12073.0} | 0.8900 | {'precision': 0.8492516485248176, 'recall': 0.8491313428694685, 'f1-score': 0.8485973275055096, 'support': 29705.0} | {'precision': 0.8946497061974581, 'recall': 0.8900185154014476, 'f1-score': 0.8919747802421455, 'support': 29705.0} |
80
+ | No log | 8.0 | 328 | 0.3382 | {'precision': 0.6177335444469577, 'recall': 0.6552303262955854, 'f1-score': 0.6359296774944697, 'support': 4168.0} | {'precision': 0.8736559139784946, 'recall': 0.9061338289962825, 'f1-score': 0.8895985401459854, 'support': 2152.0} | {'precision': 0.9999115983026874, 'recall': 0.9999115983026874, 'f1-score': 0.9999115983026874, 'support': 11312.0} | {'precision': 0.9003407155025553, 'recall': 0.8755073304066926, 'f1-score': 0.8877503884432872, 'support': 12073.0} | 0.8942 | {'precision': 0.8479104430576737, 'recall': 0.859195771000312, 'f1-score': 0.8532975510966074, 'support': 29705.0} | {'precision': 0.8966717521763674, 'recall': 0.8941928968187174, 'f1-score': 0.8952627973023706, 'support': 29705.0} |
81
+ | No log | 9.0 | 369 | 0.3559 | {'precision': 0.6090930396062808, 'recall': 0.6235604606525912, 'f1-score': 0.6162418494368701, 'support': 4168.0} | {'precision': 0.9208074534161491, 'recall': 0.8266728624535316, 'f1-score': 0.8712047012732617, 'support': 2152.0} | {'precision': 0.9999115983026874, 'recall': 0.9999115983026874, 'f1-score': 0.9999115983026874, 'support': 11312.0} | {'precision': 0.883467278989667, 'recall': 0.8923217095999337, 'f1-score': 0.8878724193348992, 'support': 12073.0} | 0.8908 | {'precision': 0.8533198425786961, 'recall': 0.835616657752186, 'f1-score': 0.8438076420869296, 'support': 29705.0} | {'precision': 0.8920174343737682, 'recall': 0.8908264601918869, 'f1-score': 0.8912173797079, 'support': 29705.0} |
82
+ | No log | 10.0 | 410 | 0.3689 | {'precision': 0.6432050657574282, 'recall': 0.633637236084453, 'f1-score': 0.6383853033599227, 'support': 4168.0} | {'precision': 0.8838137472283814, 'recall': 0.9261152416356877, 'f1-score': 0.9044701611073291, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8931094672097083, 'recall': 0.8900024848836247, 'f1-score': 0.8915532691669432, 'support': 12073.0} | 0.8985 | {'precision': 0.8550099715780937, 'recall': 0.8624387406509414, 'f1-score': 0.8585911336847918, 'support': 29705.0} | {'precision': 0.8980426387520326, 'recall': 0.8985356000673287, 'f1-score': 0.8982471762955423, 'support': 29705.0} |
83
+ | No log | 11.0 | 451 | 0.3769 | {'precision': 0.6255558155862392, 'recall': 0.6413147792706334, 'f1-score': 0.6333372823125222, 'support': 4168.0} | {'precision': 0.8893424036281179, 'recall': 0.9112453531598513, 'f1-score': 0.9001606610052789, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8938223938223938, 'recall': 0.8820508572848504, 'f1-score': 0.88789761120607, 'support': 12073.0} | 0.8953 | {'precision': 0.8521580547884019, 'recall': 0.8586527474288339, 'f1-score': 0.8553378389072108, 'support': 29705.0} | {'precision': 0.896256500285568, 'recall': 0.8953038209055715, 'f1-score': 0.8957408994227329, 'support': 29705.0} |
84
+ | No log | 12.0 | 492 | 0.3879 | {'precision': 0.6307230422817113, 'recall': 0.6048464491362764, 'f1-score': 0.6175137783221066, 'support': 4168.0} | {'precision': 0.8988711194731891, 'recall': 0.8880111524163569, 'f1-score': 0.8934081346423562, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8821419838617655, 'recall': 0.8964631823076286, 'f1-score': 0.8892449264645469, 'support': 12073.0} | 0.8944 | {'precision': 0.8529119379333807, 'recall': 0.8473301959650654, 'f1-score': 0.8500306601334956, 'support': 29705.0} | {'precision': 0.892924576633343, 'recall': 0.8943612186500589, 'f1-score': 0.8935790524525439, 'support': 29705.0} |
85
+
86
+
87
+ ### Framework versions
88
+
89
+ - Transformers 4.37.2
90
+ - Pytorch 2.2.0+cu121
91
+ - Datasets 2.17.0
92
+ - Tokenizers 0.15.2
meta_data/README_s42_e13.md ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: allenai/longformer-base-4096
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - essays_su_g
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: longformer-sep_tok
12
+ results:
13
+ - task:
14
+ name: Token Classification
15
+ type: token-classification
16
+ dataset:
17
+ name: essays_su_g
18
+ type: essays_su_g
19
+ config: sep_tok
20
+ split: train[80%:100%]
21
+ args: sep_tok
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.8962800875273523
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # longformer-sep_tok
32
+
33
+ This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.4178
36
+ - Claim: {'precision': 0.6449313621964097, 'recall': 0.5861324376199616, 'f1-score': 0.6141277023629965, 'support': 4168.0}
37
+ - Majorclaim: {'precision': 0.920619554695063, 'recall': 0.8838289962825279, 'f1-score': 0.9018492176386913, 'support': 2152.0}
38
+ - O: {'precision': 1.0, 'recall': 0.9996463932107497, 'f1-score': 0.9998231653404068, 'support': 11312.0}
39
+ - Premise: {'precision': 0.8746711313082994, 'recall': 0.9087219415224054, 'f1-score': 0.8913714657133571, 'support': 12073.0}
40
+ - Accuracy: 0.8963
41
+ - Macro avg: {'precision': 0.860055512049943, 'recall': 0.8445824421589111, 'f1-score': 0.851792887763863, 'support': 29705.0}
42
+ - Weighted avg: {'precision': 0.8934910542879485, 'recall': 0.8962800875273523, 'f1-score': 0.8945292419355487, 'support': 29705.0}
43
+
44
+ ## Model description
45
+
46
+ More information needed
47
+
48
+ ## Intended uses & limitations
49
+
50
+ More information needed
51
+
52
+ ## Training and evaluation data
53
+
54
+ More information needed
55
+
56
+ ## Training procedure
57
+
58
+ ### Training hyperparameters
59
+
60
+ The following hyperparameters were used during training:
61
+ - learning_rate: 2e-05
62
+ - train_batch_size: 8
63
+ - eval_batch_size: 8
64
+ - seed: 42
65
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
+ - lr_scheduler_type: linear
67
+ - num_epochs: 13
68
+
69
+ ### Training results
70
+
71
+ | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
+ |:-------------:|:-----:|:----:|:---------------:|:--------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
+ | No log | 1.0 | 41 | 0.3959 | {'precision': 0.40099457504520797, 'recall': 0.21281190019193857, 'f1-score': 0.2780564263322884, 'support': 4168.0} | {'precision': 0.7553648068669528, 'recall': 0.40892193308550184, 'f1-score': 0.5305999397045523, 'support': 2152.0} | {'precision': 0.9995538900785154, 'recall': 0.9903642149929278, 'f1-score': 0.9949378330373002, 'support': 11312.0} | {'precision': 0.7791005291005291, 'recall': 0.9757309699329081, 'f1-score': 0.8663994410326187, 'support': 12073.0} | 0.8332 | {'precision': 0.7337534502728014, 'recall': 0.6469572545508191, 'f1-score': 0.6674984100266899, 'support': 29705.0} | {'precision': 0.8082789007091385, 'recall': 0.8331930651405487, 'f1-score': 0.8084688595893593, 'support': 29705.0} |
74
+ | No log | 2.0 | 82 | 0.2797 | {'precision': 0.656575682382134, 'recall': 0.31741842610364684, 'f1-score': 0.42794759825327516, 'support': 4168.0} | {'precision': 0.7055785123966942, 'recall': 0.9521375464684015, 'f1-score': 0.8105221518987342, 'support': 2152.0} | {'precision': 0.9998224274172068, 'recall': 0.995491513437058, 'f1-score': 0.9976522702104098, 'support': 11312.0} | {'precision': 0.8425645197071656, 'recall': 0.9437588006295039, 'f1-score': 0.890295358649789, 'support': 12073.0} | 0.8762 | {'precision': 0.8011352854758002, 'recall': 0.8022015716596526, 'f1-score': 0.7816043447530521, 'support': 29705.0} | {'precision': 0.8664293939812986, 'recall': 0.8761824608651743, 'f1-score': 0.8605254201651165, 'support': 29705.0} |
75
+ | No log | 3.0 | 123 | 0.2480 | {'precision': 0.6007310255858955, 'recall': 0.6703454894433781, 'f1-score': 0.633631931057943, 'support': 4168.0} | {'precision': 0.8402394106813996, 'recall': 0.8480483271375465, 'f1-score': 0.8441258094357077, 'support': 2152.0} | {'precision': 0.9988508795191373, 'recall': 0.998939179632249, 'f1-score': 0.9988950276243095, 'support': 11312.0} | {'precision': 0.9099317140634454, 'recall': 0.871945663878075, 'f1-score': 0.8905337957871585, 'support': 12073.0} | 0.8903 | {'precision': 0.8374382574624696, 'recall': 0.8473196650228122, 'f1-score': 0.8417966409762797, 'support': 29705.0} | {'precision': 0.8953593287135783, 'recall': 0.890287830331594, 'f1-score': 0.8923902272203232, 'support': 29705.0} |
76
+ | No log | 4.0 | 164 | 0.2510 | {'precision': 0.6090909090909091, 'recall': 0.6590690978886756, 'f1-score': 0.6330951832219405, 'support': 4168.0} | {'precision': 0.8471655328798186, 'recall': 0.8680297397769516, 'f1-score': 0.8574707367454671, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.9060546373212298, 'recall': 0.8763356249482316, 'f1-score': 0.8909473684210527, 'support': 12073.0} | 0.8923 | {'precision': 0.8405556713522035, 'recall': 0.8508586156534647, 'f1-score': 0.8453672723733583, 'support': 29705.0} | {'precision': 0.8958622743855031, 'recall': 0.8923413566739606, 'f1-score': 0.8938537401175597, 'support': 29705.0} |
77
+ | No log | 5.0 | 205 | 0.2514 | {'precision': 0.6300216502285302, 'recall': 0.628358925143954, 'f1-score': 0.6291891891891892, 'support': 4168.0} | {'precision': 0.8886300093196645, 'recall': 0.8861524163568774, 'f1-score': 0.8873894834806887, 'support': 2152.0} | {'precision': 0.9999115592111082, 'recall': 0.9994695898161244, 'f1-score': 0.9996905256642645, 'support': 11312.0} | {'precision': 0.8906986357999174, 'recall': 0.8923217095999337, 'f1-score': 0.8915094339622642, 'support': 12073.0} | 0.8956 | {'precision': 0.8523154636398051, 'recall': 0.8515756602292224, 'f1-score': 0.8519446580741017, 'support': 29705.0} | {'precision': 0.8955618988728123, 'recall': 0.8956404645682545, 'f1-score': 0.8956005834550264, 'support': 29705.0} |
78
+ | No log | 6.0 | 246 | 0.2718 | {'precision': 0.6259613143789327, 'recall': 0.6444337811900192, 'f1-score': 0.6350632462466013, 'support': 4168.0} | {'precision': 0.8953323903818954, 'recall': 0.8824349442379182, 'f1-score': 0.8888368827521648, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.8940518895470093, 'recall': 0.8876832601673155, 'f1-score': 0.8908561928512054, 'support': 12073.0} | 0.8957 | {'precision': 0.8538363985769593, 'recall': 0.8535053938528443, 'f1-score': 0.8536227616014088, 'support': 29705.0} | {'precision': 0.8968742812635675, 'recall': 0.8957414576670594, 'f1-score': 0.8962809830838163, 'support': 29705.0} |
79
+ | No log | 7.0 | 287 | 0.2978 | {'precision': 0.6061165845648604, 'recall': 0.7084932821497121, 'f1-score': 0.6533185840707965, 'support': 4168.0} | {'precision': 0.9254881808838643, 'recall': 0.8368959107806692, 'f1-score': 0.8789653489507077, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.9068301528365426, 'recall': 0.8698749275242276, 'f1-score': 0.8879682083368562, 'support': 12073.0} | 0.8942 | {'precision': 0.8596087295713168, 'recall': 0.8536834275676832, 'f1-score': 0.854996716478506, 'support': 29705.0} | {'precision': 0.9014679321637432, 'recall': 0.8941928968187174, 'f1-score': 0.8969535321586781, 'support': 29705.0} |
80
+ | No log | 8.0 | 328 | 0.3142 | {'precision': 0.6124081279723304, 'recall': 0.6797024952015355, 'f1-score': 0.6443029338185126, 'support': 4168.0} | {'precision': 0.8891476478807638, 'recall': 0.8870817843866171, 'f1-score': 0.888113514770877, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9988507779349364, 'f1-score': 0.9994250585997965, 'support': 11312.0} | {'precision': 0.9043239061291154, 'recall': 0.8713658576989978, 'f1-score': 0.8875390196574707, 'support': 12073.0} | 0.8942 | {'precision': 0.8514699204955525, 'recall': 0.8592502288055217, 'f1-score': 0.8548451317116642, 'support': 29705.0} | {'precision': 0.8986993884640595, 'recall': 0.8941592324524491, 'f1-score': 0.8960589045328405, 'support': 29705.0} |
81
+ | No log | 9.0 | 369 | 0.3483 | {'precision': 0.6076411247048723, 'recall': 0.6792226487523992, 'f1-score': 0.6414410331936106, 'support': 4168.0} | {'precision': 0.9248466257668712, 'recall': 0.8406133828996283, 'f1-score': 0.8807205452775072, 'support': 2152.0} | {'precision': 0.9999115748518879, 'recall': 0.9996463932107497, 'f1-score': 0.9997789664471067, 'support': 11312.0} | {'precision': 0.89831083948731, 'recall': 0.8765841133106933, 'f1-score': 0.8873144965204998, 'support': 12073.0} | 0.8931 | {'precision': 0.8576775412027353, 'recall': 0.8490166345433676, 'f1-score': 0.8523137603596811, 'support': 29705.0} | {'precision': 0.8981391902465936, 'recall': 0.8931493014644, 'f1-score': 0.8951652726722716, 'support': 29705.0} |
82
+ | No log | 10.0 | 410 | 0.3626 | {'precision': 0.6293302540415704, 'recall': 0.6537907869481766, 'f1-score': 0.641327371146152, 'support': 4168.0} | {'precision': 0.8859416445623343, 'recall': 0.9312267657992565, 'f1-score': 0.9080199365654735, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8986440677966102, 'recall': 0.8783235318479251, 'f1-score': 0.8883676119465505, 'support': 12073.0} | 0.8970 | {'precision': 0.8534568931293429, 'recall': 0.8658352711488396, 'f1-score': 0.8594176801907871, 'support': 29705.0} | {'precision': 0.8984994053811064, 'recall': 0.8969870392189867, 'f1-score': 0.897622406583276, 'support': 29705.0} |
83
+ | No log | 11.0 | 451 | 0.3772 | {'precision': 0.6213443910955915, 'recall': 0.6830614203454894, 'f1-score': 0.6507428571428571, 'support': 4168.0} | {'precision': 0.8955895589558955, 'recall': 0.9247211895910781, 'f1-score': 0.9099222679469593, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9996463932107497, 'f1-score': 0.9998231653404068, 'support': 11312.0} | {'precision': 0.9045976020012076, 'recall': 0.8686324857119192, 'f1-score': 0.8862503169103356, 'support': 12073.0} | 0.8965 | {'precision': 0.8553828880131737, 'recall': 0.8690153722148091, 'f1-score': 0.8616846518351398, 'support': 29705.0} | {'precision': 0.9005311900999863, 'recall': 0.8965494024574987, 'f1-score': 0.8981702969729826, 'support': 29705.0} |
84
+ | No log | 12.0 | 492 | 0.4167 | {'precision': 0.6351280710925248, 'recall': 0.5830134357005758, 'f1-score': 0.6079559669752315, 'support': 4168.0} | {'precision': 0.9238427078148332, 'recall': 0.862453531598513, 'f1-score': 0.8920932468156694, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.8730499840815027, 'recall': 0.9085562826140976, 'f1-score': 0.8904493241871981, 'support': 12073.0} | 0.8942 | {'precision': 0.8580051907472153, 'recall': 0.8383732099323278, 'f1-score': 0.8475583156334406, 'support': 29705.0} | {'precision': 0.8916901452734269, 'recall': 0.8941592324524491, 'f1-score': 0.8925480233154618, 'support': 29705.0} |
85
+ | 0.175 | 13.0 | 533 | 0.4178 | {'precision': 0.6449313621964097, 'recall': 0.5861324376199616, 'f1-score': 0.6141277023629965, 'support': 4168.0} | {'precision': 0.920619554695063, 'recall': 0.8838289962825279, 'f1-score': 0.9018492176386913, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9996463932107497, 'f1-score': 0.9998231653404068, 'support': 11312.0} | {'precision': 0.8746711313082994, 'recall': 0.9087219415224054, 'f1-score': 0.8913714657133571, 'support': 12073.0} | 0.8963 | {'precision': 0.860055512049943, 'recall': 0.8445824421589111, 'f1-score': 0.851792887763863, 'support': 29705.0} | {'precision': 0.8934910542879485, 'recall': 0.8962800875273523, 'f1-score': 0.8945292419355487, 'support': 29705.0} |
86
+
87
+
88
+ ### Framework versions
89
+
90
+ - Transformers 4.37.2
91
+ - Pytorch 2.2.0+cu121
92
+ - Datasets 2.17.0
93
+ - Tokenizers 0.15.2
meta_data/README_s42_e14.md ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: allenai/longformer-base-4096
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - essays_su_g
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: longformer-sep_tok
12
+ results:
13
+ - task:
14
+ name: Token Classification
15
+ type: token-classification
16
+ dataset:
17
+ name: essays_su_g
18
+ type: essays_su_g
19
+ config: sep_tok
20
+ split: train[80%:100%]
21
+ args: sep_tok
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.8972900185154015
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # longformer-sep_tok
32
+
33
+ This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.4170
36
+ - Claim: {'precision': 0.6411675893306492, 'recall': 0.6113243761996161, 'f1-score': 0.6258904446082044, 'support': 4168.0}
37
+ - Majorclaim: {'precision': 0.918905715681485, 'recall': 0.8740706319702602, 'f1-score': 0.8959276018099548, 'support': 2152.0}
38
+ - O: {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0}
39
+ - Premise: {'precision': 0.8821437232236683, 'recall': 0.9039178331814793, 'f1-score': 0.8928980526918672, 'support': 12073.0}
40
+ - Accuracy: 0.8973
41
+ - Macro avg: {'precision': 0.8605321585881648, 'recall': 0.8473282103378389, 'f1-score': 0.8536679750537497, 'support': 29705.0}
42
+ - Weighted avg: {'precision': 0.8958422107843774, 'recall': 0.8972900185154015, 'f1-score': 0.8964216725962088, 'support': 29705.0}
43
+
44
+ ## Model description
45
+
46
+ More information needed
47
+
48
+ ## Intended uses & limitations
49
+
50
+ More information needed
51
+
52
+ ## Training and evaluation data
53
+
54
+ More information needed
55
+
56
+ ## Training procedure
57
+
58
+ ### Training hyperparameters
59
+
60
+ The following hyperparameters were used during training:
61
+ - learning_rate: 2e-05
62
+ - train_batch_size: 8
63
+ - eval_batch_size: 8
64
+ - seed: 42
65
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
+ - lr_scheduler_type: linear
67
+ - num_epochs: 14
68
+
69
+ ### Training results
70
+
71
+ | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
+ |:-------------:|:-----:|:----:|:---------------:|:-------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
+ | No log | 1.0 | 41 | 0.3371 | {'precision': 0.5051114023591088, 'recall': 0.4623320537428023, 'f1-score': 0.48277589878491795, 'support': 4168.0} | {'precision': 0.6749773345421578, 'recall': 0.6919144981412639, 'f1-score': 0.6833409821018817, 'support': 2152.0} | {'precision': 0.9997326917936381, 'recall': 0.9918670438472419, 'f1-score': 0.995784335478145, 'support': 11312.0} | {'precision': 0.8627718481662788, 'recall': 0.890499461608548, 'f1-score': 0.8764164017282138, 'support': 12073.0} | 0.8546 | {'precision': 0.760648319215296, 'recall': 0.759153264334964, 'f1-score': 0.7595794045232896, 'support': 29705.0} | {'precision': 0.8511387403281807, 'recall': 0.854637266453459, 'f1-score': 0.8526526632086279, 'support': 29705.0} |
74
+ | No log | 2.0 | 82 | 0.2777 | {'precision': 0.6244221879815101, 'recall': 0.388915547024952, 'f1-score': 0.4793021880544057, 'support': 4168.0} | {'precision': 0.7427921092564491, 'recall': 0.9098513011152416, 'f1-score': 0.8178780284043441, 'support': 2152.0} | {'precision': 0.9998232278592893, 'recall': 1.0, 'f1-score': 0.9999116061168567, 'support': 11312.0} | {'precision': 0.8542442434835473, 'recall': 0.9310858941439576, 'f1-score': 0.8910114140773621, 'support': 12073.0} | 0.8797 | {'precision': 0.8053204421451989, 'recall': 0.8074631855710378, 'f1-score': 0.7970258091632422, 'support': 29705.0} | {'precision': 0.869361097584513, 'recall': 0.8797172193233462, 'f1-score': 0.8694154495030058, 'support': 29705.0} |
75
+ | No log | 3.0 | 123 | 0.2431 | {'precision': 0.610574601641719, 'recall': 0.6067658349328215, 'f1-score': 0.6086642599277977, 'support': 4168.0} | {'precision': 0.8699186991869918, 'recall': 0.7955390334572491, 'f1-score': 0.8310679611650487, 'support': 2152.0} | {'precision': 0.9999115435647944, 'recall': 0.9992927864214993, 'f1-score': 0.9996020692399523, 'support': 11312.0} | {'precision': 0.8864117168429617, 'recall': 0.9023440735525553, 'f1-score': 0.8943069408529326, 'support': 12073.0} | 0.8901 | {'precision': 0.8417041403091168, 'recall': 0.8259854320910314, 'f1-score': 0.8334103077964329, 'support': 29705.0} | {'precision': 0.8897353313766411, 'recall': 0.8900521797677159, 'f1-score': 0.8897437196420146, 'support': 29705.0} |
76
+ | No log | 4.0 | 164 | 0.2505 | {'precision': 0.6349911190053286, 'recall': 0.5146353166986565, 'f1-score': 0.5685131195335278, 'support': 4168.0} | {'precision': 0.8684456928838952, 'recall': 0.8619888475836431, 'f1-score': 0.8652052238805971, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8643422891753377, 'recall': 0.921974654187029, 'f1-score': 0.8922287683860366, 'support': 12073.0} | 0.8902 | {'precision': 0.8419226767953545, 'recall': 0.8246497046173321, 'f1-score': 0.8314757282262835, 'support': 29705.0} | {'precision': 0.8840849237740477, 'recall': 0.8901868372327891, 'f1-score': 0.8858731616505928, 'support': 29705.0} |
77
+ | No log | 5.0 | 205 | 0.2614 | {'precision': 0.6192818494835219, 'recall': 0.6041266794625719, 'f1-score': 0.6116103959193587, 'support': 4168.0} | {'precision': 0.8873449131513648, 'recall': 0.8308550185873605, 'f1-score': 0.8581713462922966, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.884087401510844, 'recall': 0.9015157790110163, 'f1-score': 0.892716535433071, 'support': 12073.0} | 0.8922 | {'precision': 0.8476564425656468, 'recall': 0.8341243692652373, 'f1-score': 0.8406135196874247, 'support': 29705.0} | {'precision': 0.8912748792655566, 'recall': 0.8921730348426191, 'f1-score': 0.8916089419894234, 'support': 29705.0} |
78
+ | No log | 6.0 | 246 | 0.2767 | {'precision': 0.6314779270633397, 'recall': 0.6314779270633397, 'f1-score': 0.6314779270633397, 'support': 4168.0} | {'precision': 0.8957934990439771, 'recall': 0.870817843866171, 'f1-score': 0.8831291234684261, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8910319815364326, 'recall': 0.895386399403628, 'f1-score': 0.8932038834951457, 'support': 12073.0} | 0.8964 | {'precision': 0.8545537534401515, 'recall': 0.8494205425832846, 'f1-score': 0.851941683782971, 'support': 29705.0} | {'precision': 0.8964206972370266, 'recall': 0.8964147449924256, 'f1-score': 0.8964027733122504, 'support': 29705.0} |
79
+ | No log | 7.0 | 287 | 0.2939 | {'precision': 0.6163162705667276, 'recall': 0.6470729366602687, 'f1-score': 0.6313202247191011, 'support': 4168.0} | {'precision': 0.8980466888994759, 'recall': 0.8759293680297398, 'f1-score': 0.8868501529051988, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8941008643114878, 'recall': 0.8825478340097739, 'f1-score': 0.8882867861609004, 'support': 12073.0} | 0.8938 | {'precision': 0.852093857473637, 'recall': 0.8513875346749455, 'f1-score': 0.8516032412225432, 'support': 29705.0} | {'precision': 0.895703838190886, 'recall': 0.8937552600572294, 'f1-score': 0.8946517629052753, 'support': 29705.0} |
80
+ | No log | 8.0 | 328 | 0.3407 | {'precision': 0.6260224948875256, 'recall': 0.5875719769673704, 'f1-score': 0.6061881188118811, 'support': 4168.0} | {'precision': 0.8996990972918756, 'recall': 0.8336431226765799, 'f1-score': 0.8654124457308249, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.8776114624189546, 'recall': 0.9081421353433281, 'f1-score': 0.8926158104697549, 'support': 12073.0} | 0.8925 | {'precision': 0.850833263649589, 'recall': 0.8322067062008507, 'f1-score': 0.8409877748920311, 'support': 29705.0} | {'precision': 0.8905173338443818, 'recall': 0.8925433428715704, 'f1-score': 0.8912475861436012, 'support': 29705.0} |
81
+ | No log | 9.0 | 369 | 0.3397 | {'precision': 0.6222800378429517, 'recall': 0.6312380038387716, 'f1-score': 0.6267270128632683, 'support': 4168.0} | {'precision': 0.8995675156174916, 'recall': 0.8698884758364313, 'f1-score': 0.8844790928419561, 'support': 2152.0} | {'precision': 0.9999115983026874, 'recall': 0.9999115983026874, 'f1-score': 0.9999115983026874, 'support': 11312.0} | {'precision': 0.8896888447533929, 'recall': 0.890499461608548, 'f1-score': 0.8900939686219315, 'support': 12073.0} | 0.8943 | {'precision': 0.852861999129131, 'recall': 0.8478843848966096, 'f1-score': 0.8503029181574608, 'support': 29705.0} | {'precision': 0.8948576305014636, 'recall': 0.8942938899175223, 'f1-score': 0.8945531621135354, 'support': 29705.0} |
82
+ | No log | 10.0 | 410 | 0.3688 | {'precision': 0.6277702397105382, 'recall': 0.6660268714011516, 'f1-score': 0.6463329452852155, 'support': 4168.0} | {'precision': 0.8976014760147601, 'recall': 0.904275092936803, 'f1-score': 0.9009259259259259, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.9005253346890357, 'recall': 0.8803114387476186, 'f1-score': 0.890303664921466, 'support': 12073.0} | 0.8976 | {'precision': 0.8564521641327977, 'recall': 0.8626533507713933, 'f1-score': 0.859379584309395, 'support': 29705.0} | {'precision': 0.8998898229116948, 'recall': 0.8975593334455478, 'f1-score': 0.8985976932246313, 'support': 29705.0} |
83
+ | No log | 11.0 | 451 | 0.3741 | {'precision': 0.6391675025075225, 'recall': 0.6115642994241842, 'f1-score': 0.6250613045610592, 'support': 4168.0} | {'precision': 0.8907678244972578, 'recall': 0.9056691449814126, 'f1-score': 0.8981566820276498, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8859692206941716, 'recall': 0.8964631823076286, 'f1-score': 0.8911853102227345, 'support': 12073.0} | 0.8966 | {'precision': 0.8539540384539521, 'recall': 0.8534241566783063, 'f1-score': 0.8535897744791039, 'support': 29705.0} | {'precision': 0.8950778992965517, 'recall': 0.8965830668237671, 'f1-score': 0.8957707109763514, 'support': 29705.0} |
84
+ | No log | 12.0 | 492 | 0.4274 | {'precision': 0.6285550129273197, 'recall': 0.5249520153550864, 'f1-score': 0.572100928225912, 'support': 4168.0} | {'precision': 0.9079259610821072, 'recall': 0.8889405204460966, 'f1-score': 0.8983329420051654, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8601999375195252, 'recall': 0.9122836080510229, 'f1-score': 0.8854765445994291, 'support': 12073.0} | 0.8896 | {'precision': 0.8491481294114522, 'recall': 0.8315440359630515, 'f1-score': 0.8389665539838698, 'support': 29705.0} | {'precision': 0.8843584546775584, 'recall': 0.8896482073724962, 'f1-score': 0.8860322338020225, 'support': 29705.0} |
85
+ | 0.1761 | 13.0 | 533 | 0.4068 | {'precision': 0.6263554926921263, 'recall': 0.6374760076775432, 'f1-score': 0.6318668252080857, 'support': 4168.0} | {'precision': 0.8843537414965986, 'recall': 0.9061338289962825, 'f1-score': 0.895111315125086, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8937630807869401, 'recall': 0.8842872525470057, 'f1-score': 0.8889999167291198, 'support': 12073.0} | 0.8953 | {'precision': 0.8510959802731305, 'recall': 0.8569742723052078, 'f1-score': 0.853983464541816, 'support': 29705.0} | {'precision': 0.8959831916504318, 'recall': 0.8953038209055715, 'f1-score': 0.895616781497613, 'support': 29705.0} |
86
+ | 0.1761 | 14.0 | 574 | 0.4170 | {'precision': 0.6411675893306492, 'recall': 0.6113243761996161, 'f1-score': 0.6258904446082044, 'support': 4168.0} | {'precision': 0.918905715681485, 'recall': 0.8740706319702602, 'f1-score': 0.8959276018099548, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8821437232236683, 'recall': 0.9039178331814793, 'f1-score': 0.8928980526918672, 'support': 12073.0} | 0.8973 | {'precision': 0.8605321585881648, 'recall': 0.8473282103378389, 'f1-score': 0.8536679750537497, 'support': 29705.0} | {'precision': 0.8958422107843774, 'recall': 0.8972900185154015, 'f1-score': 0.8964216725962088, 'support': 29705.0} |
87
+
88
+
89
+ ### Framework versions
90
+
91
+ - Transformers 4.37.2
92
+ - Pytorch 2.2.0+cu121
93
+ - Datasets 2.17.0
94
+ - Tokenizers 0.15.2
meta_data/README_s42_e15.md ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: allenai/longformer-base-4096
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - essays_su_g
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: longformer-sep_tok
12
+ results:
13
+ - task:
14
+ name: Token Classification
15
+ type: token-classification
16
+ dataset:
17
+ name: essays_su_g
18
+ type: essays_su_g
19
+ config: sep_tok
20
+ split: train[80%:100%]
21
+ args: sep_tok
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.8996128597879145
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # longformer-sep_tok
32
+
33
+ This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.4440
36
+ - Claim: {'precision': 0.6588266384778013, 'recall': 0.5981285988483686, 'f1-score': 0.6270120724346077, 'support': 4168.0}
37
+ - Majorclaim: {'precision': 0.9058606368251039, 'recall': 0.9121747211895911, 'f1-score': 0.9090067145172495, 'support': 2152.0}
38
+ - O: {'precision': 1.0, 'recall': 0.999557991513437, 'f1-score': 0.9997789469030461, 'support': 11312.0}
39
+ - Premise: {'precision': 0.8805334618783642, 'recall': 0.9078108175267124, 'f1-score': 0.8939641109298531, 'support': 12073.0}
40
+ - Accuracy: 0.8996
41
+ - Macro avg: {'precision': 0.8613051842953173, 'recall': 0.8544180322695273, 'f1-score': 0.857440461196189, 'support': 29705.0}
42
+ - Weighted avg: {'precision': 0.8967541492974446, 'recall': 0.8996128597879145, 'f1-score': 0.8978925071931304, 'support': 29705.0}
43
+
44
+ ## Model description
45
+
46
+ More information needed
47
+
48
+ ## Intended uses & limitations
49
+
50
+ More information needed
51
+
52
+ ## Training and evaluation data
53
+
54
+ More information needed
55
+
56
+ ## Training procedure
57
+
58
+ ### Training hyperparameters
59
+
60
+ The following hyperparameters were used during training:
61
+ - learning_rate: 2e-05
62
+ - train_batch_size: 8
63
+ - eval_batch_size: 8
64
+ - seed: 42
65
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
+ - lr_scheduler_type: linear
67
+ - num_epochs: 15
68
+
69
+ ### Training results
70
+
71
+ | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
+ |:-------------:|:-----:|:----:|:---------------:|:-------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
+ | No log | 1.0 | 41 | 0.3459 | {'precision': 0.5076716016150741, 'recall': 0.45249520153550865, 'f1-score': 0.4784980337434987, 'support': 4168.0} | {'precision': 0.6691762621789193, 'recall': 0.7021375464684015, 'f1-score': 0.6852607709750567, 'support': 2152.0} | {'precision': 0.9995503597122302, 'recall': 0.98258486562942, 'f1-score': 0.9909950071326675, 'support': 11312.0} | {'precision': 0.8565651760228354, 'recall': 0.8948065932245507, 'f1-score': 0.8752683816082643, 'support': 12073.0} | 0.8522 | {'precision': 0.7582408498822648, 'recall': 0.7580060517144703, 'f1-score': 0.7575055483648718, 'support': 29705.0} | {'precision': 0.8484856957054067, 'recall': 0.8522134320821411, 'f1-score': 0.8499010831719419, 'support': 29705.0} |
74
+ | No log | 2.0 | 82 | 0.2620 | {'precision': 0.6607431340872375, 'recall': 0.3925143953934741, 'f1-score': 0.49247441300421435, 'support': 4168.0} | {'precision': 0.7220035778175313, 'recall': 0.9377323420074349, 'f1-score': 0.815847988680008, 'support': 2152.0} | {'precision': 0.9998230714791224, 'recall': 0.9991159830268741, 'f1-score': 0.9994694021931375, 'support': 11312.0} | {'precision': 0.8597867479055598, 'recall': 0.9350617079433446, 'f1-score': 0.8958457326508749, 'support': 12073.0} | 0.8835 | {'precision': 0.8105891328223628, 'recall': 0.8161061070927819, 'f1-score': 0.8009093841320587, 'support': 29705.0} | {'precision': 0.8752039412346267, 'recall': 0.8835212927116647, 'f1-score': 0.8729130325852121, 'support': 29705.0} |
75
+ | No log | 3.0 | 123 | 0.2374 | {'precision': 0.6255685898970553, 'recall': 0.6269193857965452, 'f1-score': 0.6262432594367885, 'support': 4168.0} | {'precision': 0.8762836185819071, 'recall': 0.8327137546468402, 'f1-score': 0.8539432928282107, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9992927864214993, 'f1-score': 0.9996462681287585, 'support': 11312.0} | {'precision': 0.8920272600377699, 'recall': 0.8998591899279383, 'f1-score': 0.8959261091868711, 'support': 12073.0} | 0.8946 | {'precision': 0.848469867129183, 'recall': 0.8396962791982058, 'f1-score': 0.8439397323951572, 'support': 29705.0} | {'precision': 0.894616305009769, 'recall': 0.8945632048476687, 'f1-score': 0.8945424128188674, 'support': 29705.0} |
76
+ | No log | 4.0 | 164 | 0.2502 | {'precision': 0.6409472880061116, 'recall': 0.6038867562380038, 'f1-score': 0.6218653489808523, 'support': 4168.0} | {'precision': 0.8598290598290599, 'recall': 0.9349442379182156, 'f1-score': 0.8958147818343721, 'support': 2152.0} | {'precision': 0.9998231340643792, 'recall': 0.9994695898161244, 'f1-score': 0.9996463306808134, 'support': 11312.0} | {'precision': 0.8900247320692498, 'recall': 0.8942267870454734, 'f1-score': 0.8921208114696525, 'support': 12073.0} | 0.8965 | {'precision': 0.8476560534922002, 'recall': 0.8581318427544543, 'f1-score': 0.8523618182414225, 'support': 29705.0} | {'precision': 0.8947008354139008, 'recall': 0.8965157380912304, 'f1-score': 0.8954149818075824, 'support': 29705.0} |
77
+ | No log | 5.0 | 205 | 0.2594 | {'precision': 0.6565992865636148, 'recall': 0.6624280230326296, 'f1-score': 0.6595007763047892, 'support': 4168.0} | {'precision': 0.8777137793531237, 'recall': 0.9205390334572491, 'f1-score': 0.8986164663188932, 'support': 2152.0} | {'precision': 0.9999115670321896, 'recall': 0.999557991513437, 'f1-score': 0.9997347480106101, 'support': 11312.0} | {'precision': 0.9036447423544198, 'recall': 0.8933156630497805, 'f1-score': 0.8984505164945018, 'support': 12073.0} | 0.9033 | {'precision': 0.8594673438258369, 'recall': 0.868960177763274, 'f1-score': 0.8640756267821986, 'support': 29705.0} | {'precision': 0.903761942443296, 'recall': 0.9033496044436964, 'f1-score': 0.9035049461804666, 'support': 29705.0} |
78
+ | No log | 6.0 | 246 | 0.2753 | {'precision': 0.637114951164538, 'recall': 0.6103646833013435, 'f1-score': 0.6234530082097782, 'support': 4168.0} | {'precision': 0.9366306027820711, 'recall': 0.8447955390334573, 'f1-score': 0.8883459565111166, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.8789410348977136, 'recall': 0.9074794997100969, 'f1-score': 0.8929823131469556, 'support': 12073.0} | 0.8963 | {'precision': 0.8631716472110806, 'recall': 0.8405273279652555, 'f1-score': 0.8511290006058785, 'support': 29705.0} | {'precision': 0.8952896579013939, 'recall': 0.8962800875273523, 'f1-score': 0.895480468184721, 'support': 29705.0} |
79
+ | No log | 7.0 | 287 | 0.2966 | {'precision': 0.6248019914007694, 'recall': 0.6624280230326296, 'f1-score': 0.643065098404565, 'support': 4168.0} | {'precision': 0.8643994834266036, 'recall': 0.9330855018587361, 'f1-score': 0.8974301675977654, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9997347949080623, 'f1-score': 0.9998673798682641, 'support': 11312.0} | {'precision': 0.9038098506950404, 'recall': 0.8724426406029985, 'f1-score': 0.8878492856239727, 'support': 12073.0} | 0.8958 | {'precision': 0.8482528313806034, 'recall': 0.8669227401006067, 'f1-score': 0.8570529828736417, 'support': 29705.0} | {'precision': 0.898436583603221, 'recall': 0.8958424507658643, 'f1-score': 0.8968547139279125, 'support': 29705.0} |
80
+ | No log | 8.0 | 328 | 0.3421 | {'precision': 0.6529668636013357, 'recall': 0.6098848368522073, 'f1-score': 0.6306909812678327, 'support': 4168.0} | {'precision': 0.9046478198370868, 'recall': 0.8773234200743495, 'f1-score': 0.8907761264449163, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9999115983026874, 'f1-score': 0.9999557971975424, 'support': 11312.0} | {'precision': 0.8831158369582729, 'recall': 0.9080593058891742, 'f1-score': 0.8954138930861273, 'support': 12073.0} | 0.8990 | {'precision': 0.8601826300991738, 'recall': 0.8487947902796046, 'f1-score': 0.8542091994991047, 'support': 29705.0} | {'precision': 0.8968936372791452, 'recall': 0.8989732368288167, 'f1-score': 0.8977445596081872, 'support': 29705.0} |
81
+ | No log | 9.0 | 369 | 0.3315 | {'precision': 0.6249177451195438, 'recall': 0.6835412667946257, 'f1-score': 0.6529162369657385, 'support': 4168.0} | {'precision': 0.9221616261774913, 'recall': 0.8643122676579925, 'f1-score': 0.8923003118253778, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9990275813295615, 'f1-score': 0.9995135541502675, 'support': 11312.0} | {'precision': 0.9006594521474467, 'recall': 0.8823821751014661, 'f1-score': 0.8914271369398771, 'support': 12073.0} | 0.8976 | {'precision': 0.8619347058611204, 'recall': 0.8573158227209113, 'f1-score': 0.8590393099703152, 'support': 29705.0} | {'precision': 0.901357029017618, 'recall': 0.8975929978118162, 'f1-score': 0.8991847263270282, 'support': 29705.0} |
82
+ | No log | 10.0 | 410 | 0.3580 | {'precision': 0.6654402102496715, 'recall': 0.6074856046065259, 'f1-score': 0.635143609682679, 'support': 4168.0} | {'precision': 0.8756989247311828, 'recall': 0.9460966542750929, 'f1-score': 0.9095376368103641, 'support': 2152.0} | {'precision': 0.9999115592111082, 'recall': 0.9994695898161244, 'f1-score': 0.9996905256642645, 'support': 11312.0} | {'precision': 0.8878382784479948, 'recall': 0.9021784146442475, 'f1-score': 0.8949509058789696, 'support': 12073.0} | 0.9011 | {'precision': 0.8572222431599893, 'recall': 0.8638075658354977, 'f1-score': 0.8598306695090694, 'support': 29705.0} | {'precision': 0.898432249649582, 'recall': 0.9010604275374516, 'f1-score': 0.8994393224226316, 'support': 29705.0} |
83
+ | No log | 11.0 | 451 | 0.3818 | {'precision': 0.6605737496826606, 'recall': 0.6242802303262955, 'f1-score': 0.6419143949673121, 'support': 4168.0} | {'precision': 0.9070716228467816, 'recall': 0.9298327137546468, 'f1-score': 0.9183111519045434, 'support': 2152.0} | {'precision': 0.9999115826702034, 'recall': 0.9997347949080623, 'f1-score': 0.9998231809742728, 'support': 11312.0} | {'precision': 0.8877551020408163, 'recall': 0.9007703139236313, 'f1-score': 0.8942153517247049, 'support': 12073.0} | 0.9018 | {'precision': 0.8638280143101154, 'recall': 0.8636545132281589, 'f1-score': 0.8635660198927083, 'support': 29705.0} | {'precision': 0.8999884427250537, 'recall': 0.901767379229086, 'f1-score': 0.9007765211808001, 'support': 29705.0} |
84
+ | No log | 12.0 | 492 | 0.4293 | {'precision': 0.6597707576181158, 'recall': 0.5662188099808061, 'f1-score': 0.6094254357650096, 'support': 4168.0} | {'precision': 0.9170684667309547, 'recall': 0.8838289962825279, 'f1-score': 0.9001419782300046, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.8703326011923439, 'recall': 0.9189927938374887, 'f1-score': 0.8940010475001007, 'support': 12073.0} | 0.8976 | {'precision': 0.8617929563853536, 'recall': 0.8421275474792367, 'f1-score': 0.8508257965126946, 'support': 29705.0} | {'precision': 0.8935526460983838, 'recall': 0.8975929978118162, 'f1-score': 0.8947808316465885, 'support': 29705.0} |
85
+ | 0.1673 | 13.0 | 533 | 0.4672 | {'precision': 0.686539643515673, 'recall': 0.5359884836852208, 'f1-score': 0.6019940716787928, 'support': 4168.0} | {'precision': 0.9140989729225023, 'recall': 0.9098513011152416, 'f1-score': 0.911970190964136, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9993811881188119, 'f1-score': 0.9996904982977407, 'support': 11312.0} | {'precision': 0.8640418332820671, 'recall': 0.9306717468731881, 'f1-score': 0.896119950552299, 'support': 12073.0} | 0.8999 | {'precision': 0.8661701124300606, 'recall': 0.8439731799481155, 'f1-score': 0.852443677873242, 'support': 29705.0} | {'precision': 0.8945367876491145, 'recall': 0.8999495034505975, 'f1-score': 0.8954393610999487, 'support': 29705.0} |
86
+ | 0.1673 | 14.0 | 574 | 0.4210 | {'precision': 0.6521739130434783, 'recall': 0.6477927063339731, 'f1-score': 0.6499759268175253, 'support': 4168.0} | {'precision': 0.9036697247706422, 'recall': 0.9154275092936803, 'f1-score': 0.909510618651893, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.8944449043795016, 'recall': 0.8948894226787045, 'f1-score': 0.8946671083140114, 'support': 12073.0} | 0.9015 | {'precision': 0.8625721355484055, 'recall': 0.8643948070306205, 'f1-score': 0.8634720945847734, 'support': 29705.0} | {'precision': 0.9013159888182246, 'recall': 0.9015317286652079, 'f1-score': 0.9014200207764028, 'support': 29705.0} |
87
+ | 0.1673 | 15.0 | 615 | 0.4440 | {'precision': 0.6588266384778013, 'recall': 0.5981285988483686, 'f1-score': 0.6270120724346077, 'support': 4168.0} | {'precision': 0.9058606368251039, 'recall': 0.9121747211895911, 'f1-score': 0.9090067145172495, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.999557991513437, 'f1-score': 0.9997789469030461, 'support': 11312.0} | {'precision': 0.8805334618783642, 'recall': 0.9078108175267124, 'f1-score': 0.8939641109298531, 'support': 12073.0} | 0.8996 | {'precision': 0.8613051842953173, 'recall': 0.8544180322695273, 'f1-score': 0.857440461196189, 'support': 29705.0} | {'precision': 0.8967541492974446, 'recall': 0.8996128597879145, 'f1-score': 0.8978925071931304, 'support': 29705.0} |
88
+
89
+
90
+ ### Framework versions
91
+
92
+ - Transformers 4.37.2
93
+ - Pytorch 2.2.0+cu121
94
+ - Datasets 2.17.0
95
+ - Tokenizers 0.15.2
meta_data/README_s42_e4.md CHANGED
@@ -1,5 +1,4 @@
1
  ---
2
- license: apache-2.0
3
  base_model: allenai/longformer-base-4096
4
  tags:
5
  - generated_from_trainer
@@ -17,12 +16,12 @@ model-index:
17
  name: essays_su_g
18
  type: essays_su_g
19
  config: sep_tok
20
- split: test
21
  args: sep_tok
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.8875012488760116
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -32,14 +31,14 @@ should probably proofread and complete it, then remove this comment. -->
32
 
33
  This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.2595
36
- - Claim: {'precision': 0.6180851063829788, 'recall': 0.5465663217309501, 'f1-score': 0.5801298052920619, 'support': 4252.0}
37
- - Majorclaim: {'precision': 0.8535564853556485, 'recall': 0.8414298808432631, 'f1-score': 0.8474498038310638, 'support': 2182.0}
38
- - O: {'precision': 0.9998243611135506, 'recall': 0.9992978144474678, 'f1-score': 0.9995610184372257, 'support': 11393.0}
39
- - Premise: {'precision': 0.8723387540262393, 'recall': 0.9101639344262296, 'f1-score': 0.8908500140398733, 'support': 12200.0}
40
- - Accuracy: 0.8875
41
- - Macro avg: {'precision': 0.8359511767196043, 'recall': 0.8243644878619776, 'f1-score': 0.8294976604000562, 'support': 30027.0}
42
- - Weighted avg: {'precision': 0.8833413217661854, 'recall': 0.8875012488760116, 'f1-score': 0.884944092263729, 'support': 30027.0}
43
 
44
  ## Model description
45
 
@@ -68,12 +67,12 @@ The following hyperparameters were used during training:
68
 
69
  ### Training results
70
 
71
- | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
- |:-------------:|:-----:|:----:|:---------------:|:-------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
- | No log | 1.0 | 41 | 0.3497 | {'precision': 0.5091044221479004, 'recall': 0.3222013170272813, 'f1-score': 0.39464208555379515, 'support': 4252.0} | {'precision': 0.6774921064501579, 'recall': 0.6883593033913841, 'f1-score': 0.682882473289384, 'support': 2182.0} | {'precision': 0.9962913907284768, 'recall': 0.9903449486526815, 'f1-score': 0.9933092701822344, 'support': 11393.0} | {'precision': 0.8249963752356096, 'recall': 0.9327868852459016, 'f1-score': 0.875586673847811, 'support': 12200.0} | 0.8504 | {'precision': 0.7519710736405363, 'recall': 0.7334231135793121, 'f1-score': 0.7366051257183062, 'support': 30027.0} | {'precision': 0.8345390272651644, 'recall': 0.8504013054917241, 'f1-score': 0.8381455903227649, 'support': 30027.0} |
74
- | No log | 2.0 | 82 | 0.2760 | {'precision': 0.5628667225481978, 'recall': 0.6317027281279398, 'f1-score': 0.5953014184397162, 'support': 4252.0} | {'precision': 0.8442959917780062, 'recall': 0.7529789184234648, 'f1-score': 0.7960271317829457, 'support': 2182.0} | {'precision': 0.9999119795792624, 'recall': 0.9971034845958044, 'f1-score': 0.9985057572294981, 'support': 11393.0} | {'precision': 0.8920321392701708, 'recall': 0.8736065573770492, 'f1-score': 0.8827232068908398, 'support': 12200.0} | 0.8774 | {'precision': 0.8247767082939093, 'recall': 0.8138479221310645, 'f1-score': 0.8181393785857499, 'support': 30027.0} | {'precision': 0.8828838192552426, 'recall': 0.8774436340626769, 'f1-score': 0.8796533802557692, 'support': 30027.0} |
75
- | No log | 3.0 | 123 | 0.2556 | {'precision': 0.622185154295246, 'recall': 0.526340545625588, 'f1-score': 0.570263727863422, 'support': 4252.0} | {'precision': 0.8404255319148937, 'recall': 0.8327222731439047, 'f1-score': 0.8365561694290976, 'support': 2182.0} | {'precision': 0.9998242839571253, 'recall': 0.9988589484771351, 'f1-score': 0.9993413830954994, 'support': 11393.0} | {'precision': 0.8682290858295825, 'recall': 0.9170491803278689, 'f1-score': 0.8919716176353345, 'support': 12200.0} | 0.8866 | {'precision': 0.8326660139992118, 'recall': 0.8187427368936242, 'f1-score': 0.8245332245058383, 'support': 30027.0} | {'precision': 0.8812979219018255, 'recall': 0.8866353615079762, 'f1-score': 0.8831277531997092, 'support': 30027.0} |
76
- | No log | 4.0 | 164 | 0.2595 | {'precision': 0.6180851063829788, 'recall': 0.5465663217309501, 'f1-score': 0.5801298052920619, 'support': 4252.0} | {'precision': 0.8535564853556485, 'recall': 0.8414298808432631, 'f1-score': 0.8474498038310638, 'support': 2182.0} | {'precision': 0.9998243611135506, 'recall': 0.9992978144474678, 'f1-score': 0.9995610184372257, 'support': 11393.0} | {'precision': 0.8723387540262393, 'recall': 0.9101639344262296, 'f1-score': 0.8908500140398733, 'support': 12200.0} | 0.8875 | {'precision': 0.8359511767196043, 'recall': 0.8243644878619776, 'f1-score': 0.8294976604000562, 'support': 30027.0} | {'precision': 0.8833413217661854, 'recall': 0.8875012488760116, 'f1-score': 0.884944092263729, 'support': 30027.0} |
77
 
78
 
79
  ### Framework versions
 
1
  ---
 
2
  base_model: allenai/longformer-base-4096
3
  tags:
4
  - generated_from_trainer
 
16
  name: essays_su_g
17
  type: essays_su_g
18
  config: sep_tok
19
+ split: train[80%:100%]
20
  args: sep_tok
21
  metrics:
22
  - name: Accuracy
23
  type: accuracy
24
+ value: 0.888301632721764
25
  ---
26
 
27
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
31
 
32
  This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
33
  It achieves the following results on the evaluation set:
34
+ - Loss: 0.2543
35
+ - Claim: {'precision': 0.6146161934805467, 'recall': 0.5609404990403071, 'f1-score': 0.5865529352734571, 'support': 4168.0}
36
+ - Majorclaim: {'precision': 0.8342541436464088, 'recall': 0.8420074349442379, 'f1-score': 0.8381128584643849, 'support': 2152.0}
37
+ - O: {'precision': 0.9999115122555526, 'recall': 0.998939179632249, 'f1-score': 0.9994251094503163, 'support': 11312.0}
38
+ - Premise: {'precision': 0.8800289668490505, 'recall': 0.9059057400811729, 'f1-score': 0.8927798865352434, 'support': 12073.0}
39
+ - Accuracy: 0.8883
40
+ - Macro avg: {'precision': 0.8322027040578897, 'recall': 0.8269482134244918, 'f1-score': 0.8292176974308504, 'support': 29705.0}
41
+ - Weighted avg: {'precision': 0.8851245229744956, 'recall': 0.888301632721764, 'f1-score': 0.8864635554242416, 'support': 29705.0}
42
 
43
  ## Model description
44
 
 
67
 
68
  ### Training results
69
 
70
+ | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
71
+ |:-------------:|:-----:|:----:|:---------------:|:------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
72
+ | No log | 1.0 | 41 | 0.4315 | {'precision': 0.3877917414721723, 'recall': 0.2072936660268714, 'f1-score': 0.2701688555347092, 'support': 4168.0} | {'precision': 0.6876106194690266, 'recall': 0.36105947955390333, 'f1-score': 0.473491773308958, 'support': 2152.0} | {'precision': 0.9996342021033379, 'recall': 0.9663189533239038, 'f1-score': 0.982694295860116, 'support': 11312.0} | {'precision': 0.7669997404619777, 'recall': 0.9791269775532179, 'f1-score': 0.8601782790613062, 'support': 12073.0} | 0.8212 | {'precision': 0.7105090758766286, 'recall': 0.6284497691144741, 'f1-score': 0.6466333009412724, 'support': 29705.0} | {'precision': 0.7966303313362657, 'recall': 0.8211748863827638, 'f1-score': 0.7960339445852997, 'support': 29705.0} |
73
+ | No log | 2.0 | 82 | 0.2780 | {'precision': 0.6238676644348169, 'recall': 0.3800383877159309, 'f1-score': 0.4723423289100939, 'support': 4168.0} | {'precision': 0.7306525037936267, 'recall': 0.8949814126394052, 'f1-score': 0.8045112781954887, 'support': 2152.0} | {'precision': 0.9998226164079823, 'recall': 0.996552333804809, 'f1-score': 0.9981847965643954, 'support': 11312.0} | {'precision': 0.8501697472651829, 'recall': 0.9334051188602667, 'f1-score': 0.8898452305748579, 'support': 12073.0} | 0.8770 | {'precision': 0.8011281329754022, 'recall': 0.8012443132551029, 'f1-score': 0.791220908561209, 'support': 29705.0} | {'precision': 0.8667475983527302, 'recall': 0.8770240700218819, 'f1-score': 0.866338966000359, 'support': 29705.0} |
74
+ | No log | 3.0 | 123 | 0.2682 | {'precision': 0.6295336787564767, 'recall': 0.46641074856046066, 'f1-score': 0.535832414553473, 'support': 4168.0} | {'precision': 0.7360406091370558, 'recall': 0.9433085501858736, 'f1-score': 0.8268839103869654, 'support': 2152.0} | {'precision': 0.9999115200849407, 'recall': 0.9990275813295615, 'f1-score': 0.9994693552666489, 'support': 11312.0} | {'precision': 0.8739348570518436, 'recall': 0.9089704298848671, 'f1-score': 0.8911084043848965, 'support': 12073.0} | 0.8837 | {'precision': 0.8098551662575791, 'recall': 0.8294293274901907, 'f1-score': 0.8133235211479959, 'support': 29705.0} | {'precision': 0.8776256659925162, 'recall': 0.8836559501767379, 'f1-score': 0.8778708228219765, 'support': 29705.0} |
75
+ | No log | 4.0 | 164 | 0.2543 | {'precision': 0.6146161934805467, 'recall': 0.5609404990403071, 'f1-score': 0.5865529352734571, 'support': 4168.0} | {'precision': 0.8342541436464088, 'recall': 0.8420074349442379, 'f1-score': 0.8381128584643849, 'support': 2152.0} | {'precision': 0.9999115122555526, 'recall': 0.998939179632249, 'f1-score': 0.9994251094503163, 'support': 11312.0} | {'precision': 0.8800289668490505, 'recall': 0.9059057400811729, 'f1-score': 0.8927798865352434, 'support': 12073.0} | 0.8883 | {'precision': 0.8322027040578897, 'recall': 0.8269482134244918, 'f1-score': 0.8292176974308504, 'support': 29705.0} | {'precision': 0.8851245229744956, 'recall': 0.888301632721764, 'f1-score': 0.8864635554242416, 'support': 29705.0} |
76
 
77
 
78
  ### Framework versions
meta_data/README_s42_e5.md CHANGED
@@ -1,5 +1,4 @@
1
  ---
2
- license: apache-2.0
3
  base_model: allenai/longformer-base-4096
4
  tags:
5
  - generated_from_trainer
@@ -17,12 +16,12 @@ model-index:
17
  name: essays_su_g
18
  type: essays_su_g
19
  config: sep_tok
20
- split: test
21
  args: sep_tok
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.8921304159589702
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -32,14 +31,14 @@ should probably proofread and complete it, then remove this comment. -->
32
 
33
  This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.2544
36
- - Claim: {'precision': 0.618502404396611, 'recall': 0.6352304797742239, 'f1-score': 0.6267548439494142, 'support': 4252.0}
37
- - Majorclaim: {'precision': 0.8701787394167451, 'recall': 0.847846012832264, 'f1-score': 0.8588672237697308, 'support': 2182.0}
38
- - O: {'precision': 1.0, 'recall': 0.9994733608356008, 'f1-score': 0.9997366110623354, 'support': 11393.0}
39
- - Premise: {'precision': 0.8932246645262205, 'recall': 0.889344262295082, 'f1-score': 0.8912802398652812, 'support': 12200.0}
40
- - Accuracy: 0.8921
41
- - Macro avg: {'precision': 0.8454764520848941, 'recall': 0.8429735289342927, 'f1-score': 0.8441597296616904, 'support': 30027.0}
42
- - Weighted avg: {'precision': 0.8931609265035342, 'recall': 0.8921304159589702, 'f1-score': 0.8926175780107263, 'support': 30027.0}
43
 
44
  ## Model description
45
 
@@ -68,13 +67,13 @@ The following hyperparameters were used during training:
68
 
69
  ### Training results
70
 
71
- | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
- |:-------------:|:-----:|:----:|:---------------:|:-------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
- | No log | 1.0 | 41 | 0.3491 | {'precision': 0.5206252713851498, 'recall': 0.28198494825964254, 'f1-score': 0.3658276125095347, 'support': 4252.0} | {'precision': 0.626488632262721, 'recall': 0.7956003666361137, 'f1-score': 0.7009892994144962, 'support': 2182.0} | {'precision': 0.9948045086297992, 'recall': 0.9915737733696129, 'f1-score': 0.9931865136929096, 'support': 11393.0} | {'precision': 0.8307714937118482, 'recall': 0.9259016393442623, 'f1-score': 0.8757607473737257, 'support': 12200.0} | 0.8502 | {'precision': 0.7431724764973796, 'recall': 0.7487651819024079, 'f1-score': 0.7339410432476665, 'support': 30027.0} | {'precision': 0.8342464062220922, 'recall': 0.8501681819695607, 'f1-score': 0.8354052262355796, 'support': 30027.0} |
74
- | No log | 2.0 | 82 | 0.2801 | {'precision': 0.5533096401599289, 'recall': 0.5858419567262465, 'f1-score': 0.5691112634224355, 'support': 4252.0} | {'precision': 0.7518549051937345, 'recall': 0.8359303391384051, 'f1-score': 0.7916666666666666, 'support': 2182.0} | {'precision': 0.9982446901878181, 'recall': 0.9983323093127359, 'f1-score': 0.9982884978277087, 'support': 11393.0} | {'precision': 0.8966253737718923, 'recall': 0.8602459016393442, 'f1-score': 0.8780589834762602, 'support': 12200.0} | 0.8720 | {'precision': 0.8000086523283435, 'recall': 0.820087626704183, 'f1-score': 0.8092813528482677, 'support': 30027.0} | {'precision': 0.8760466016724828, 'recall': 0.8720151863323009, 'f1-score': 0.8736503218070512, 'support': 30027.0} |
75
- | No log | 3.0 | 123 | 0.2615 | {'precision': 0.6233933161953727, 'recall': 0.45625587958607716, 'f1-score': 0.5268875611080934, 'support': 4252.0} | {'precision': 0.8267020335985853, 'recall': 0.8570119156736938, 'f1-score': 0.8415841584158417, 'support': 2182.0} | {'precision': 1.0, 'recall': 0.9987711752830686, 'f1-score': 0.9993852099069033, 'support': 11393.0} | {'precision': 0.8530962784390538, 'recall': 0.9281967213114755, 'f1-score': 0.8890633587186937, 'support': 12200.0} | 0.8830 | {'precision': 0.825797907058253, 'recall': 0.8100589229635788, 'f1-score': 0.814230072037383, 'support': 30027.0} | {'precision': 0.8743899428757883, 'recall': 0.8829719918739801, 'f1-score': 0.8761858066517598, 'support': 30027.0} |
76
- | No log | 4.0 | 164 | 0.2551 | {'precision': 0.6193501099438065, 'recall': 0.5961900282220132, 'f1-score': 0.6075494307968844, 'support': 4252.0} | {'precision': 0.8261056247316445, 'recall': 0.8817598533455545, 'f1-score': 0.8530259365994236, 'support': 2182.0} | {'precision': 1.0, 'recall': 0.9994733608356008, 'f1-score': 0.9997366110623354, 'support': 11393.0} | {'precision': 0.8906531347192667, 'recall': 0.8919672131147541, 'f1-score': 0.8913096895732656, 'support': 12200.0} | 0.8901 | {'precision': 0.8340272173486795, 'recall': 0.8423476138794807, 'f1-score': 0.8379054170079773, 'support': 30027.0} | {'precision': 0.8890334493695863, 'recall': 0.890132214340427, 'f1-score': 0.88948546961186, 'support': 30027.0} |
77
- | No log | 5.0 | 205 | 0.2544 | {'precision': 0.618502404396611, 'recall': 0.6352304797742239, 'f1-score': 0.6267548439494142, 'support': 4252.0} | {'precision': 0.8701787394167451, 'recall': 0.847846012832264, 'f1-score': 0.8588672237697308, 'support': 2182.0} | {'precision': 1.0, 'recall': 0.9994733608356008, 'f1-score': 0.9997366110623354, 'support': 11393.0} | {'precision': 0.8932246645262205, 'recall': 0.889344262295082, 'f1-score': 0.8912802398652812, 'support': 12200.0} | 0.8921 | {'precision': 0.8454764520848941, 'recall': 0.8429735289342927, 'f1-score': 0.8441597296616904, 'support': 30027.0} | {'precision': 0.8931609265035342, 'recall': 0.8921304159589702, 'f1-score': 0.8926175780107263, 'support': 30027.0} |
78
 
79
 
80
  ### Framework versions
 
1
  ---
 
2
  base_model: allenai/longformer-base-4096
3
  tags:
4
  - generated_from_trainer
 
16
  name: essays_su_g
17
  type: essays_su_g
18
  config: sep_tok
19
+ split: train[80%:100%]
20
  args: sep_tok
21
  metrics:
22
  - name: Accuracy
23
  type: accuracy
24
+ value: 0.8854738259552264
25
  ---
26
 
27
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
31
 
32
  This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
33
  It achieves the following results on the evaluation set:
34
+ - Loss: 0.2621
35
+ - Claim: {'precision': 0.5981237322515213, 'recall': 0.565978886756238, 'f1-score': 0.5816074950690335, 'support': 4168.0}
36
+ - Majorclaim: {'precision': 0.8415746519443111, 'recall': 0.8145910780669146, 'f1-score': 0.8278630460448643, 'support': 2152.0}
37
+ - O: {'precision': 0.9999115904871364, 'recall': 0.9998231966053748, 'f1-score': 0.9998673915926268, 'support': 11312.0}
38
+ - Premise: {'precision': 0.8798415137058301, 'recall': 0.9012672906485546, 'f1-score': 0.8904255319148937, 'support': 12073.0}
39
+ - Accuracy: 0.8855
40
+ - Macro avg: {'precision': 0.8298628720971998, 'recall': 0.8204151130192705, 'f1-score': 0.8249408661553546, 'support': 29705.0}
41
+ - Weighted avg: {'precision': 0.8832645976626653, 'recall': 0.8854738259552264, 'f1-score': 0.8842386364262106, 'support': 29705.0}
42
 
43
  ## Model description
44
 
 
67
 
68
  ### Training results
69
 
70
+ | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
71
+ |:-------------:|:-----:|:----:|:---------------:|:--------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
72
+ | No log | 1.0 | 41 | 0.3617 | {'precision': 0.4788679245283019, 'recall': 0.30446257197696736, 'f1-score': 0.37224992666471113, 'support': 4168.0} | {'precision': 0.6963803349540789, 'recall': 0.5989776951672863, 'f1-score': 0.6440169872595554, 'support': 2152.0} | {'precision': 0.9991923180472045, 'recall': 0.9842644978783592, 'f1-score': 0.9916722333556, 'support': 11312.0} | {'precision': 0.8126733518241945, 'recall': 0.9464921726165825, 'f1-score': 0.8744929976276117, 'support': 12073.0} | 0.8456 | {'precision': 0.7467784823384449, 'recall': 0.7085492344097988, 'f1-score': 0.7206080362268695, 'support': 29705.0} | {'precision': 0.8284396858636128, 'recall': 0.8456152162935533, 'f1-score': 0.8319479048980907, 'support': 29705.0} |
73
+ | No log | 2.0 | 82 | 0.2796 | {'precision': 0.5955649419218585, 'recall': 0.4059500959692898, 'f1-score': 0.4828078185190469, 'support': 4168.0} | {'precision': 0.760759493670886, 'recall': 0.8378252788104089, 'f1-score': 0.7974347633790357, 'support': 2152.0} | {'precision': 0.9999115357395613, 'recall': 0.9992043847241867, 'f1-score': 0.999557835160948, 'support': 11312.0} | {'precision': 0.8505686125852919, 'recall': 0.9292636461525718, 'f1-score': 0.8881763844357361, 'support': 12073.0} | 0.8758 | {'precision': 0.8017011459793993, 'recall': 0.7930608514141144, 'f1-score': 0.7919942003736917, 'support': 29705.0} | {'precision': 0.8651534509455714, 'recall': 0.8758458172024912, 'f1-score': 0.8671393475513334, 'support': 29705.0} |
74
+ | No log | 3.0 | 123 | 0.2584 | {'precision': 0.6091815161582603, 'recall': 0.48392514395393477, 'f1-score': 0.5393769220484022, 'support': 4168.0} | {'precision': 0.7808161548169962, 'recall': 0.862453531598513, 'f1-score': 0.8196069772576728, 'support': 2152.0} | {'precision': 0.9999115748518879, 'recall': 0.9996463932107497, 'f1-score': 0.9997789664471067, 'support': 11312.0} | {'precision': 0.8697670758577274, 'recall': 0.9155139567630249, 'f1-score': 0.892054396513458, 'support': 12073.0} | 0.8832 | {'precision': 0.8149190804212179, 'recall': 0.8153847563815556, 'f1-score': 0.8127043155666599, 'support': 29705.0} | {'precision': 0.8763198978646256, 'recall': 0.8831509846827134, 'f1-score': 0.8783433638684701, 'support': 29705.0} |
75
+ | No log | 4.0 | 164 | 0.2543 | {'precision': 0.5829736211031175, 'recall': 0.583253358925144, 'f1-score': 0.58311345646438, 'support': 4168.0} | {'precision': 0.8634197988353626, 'recall': 0.7578996282527881, 'f1-score': 0.8072259341747091, 'support': 2152.0} | {'precision': 0.9999115904871364, 'recall': 0.9998231966053748, 'f1-score': 0.9998673915926268, 'support': 11312.0} | {'precision': 0.8795297932711795, 'recall': 0.89861674811563, 'f1-score': 0.8889708292363159, 'support': 12073.0} | 0.8827 | {'precision': 0.831458700924199, 'recall': 0.8098982329747342, 'f1-score': 0.8197944028670079, 'support': 29705.0} | {'precision': 0.8825947337352275, 'recall': 0.8827133479212254, 'f1-score': 0.8823636375005335, 'support': 29705.0} |
76
+ | No log | 5.0 | 205 | 0.2621 | {'precision': 0.5981237322515213, 'recall': 0.565978886756238, 'f1-score': 0.5816074950690335, 'support': 4168.0} | {'precision': 0.8415746519443111, 'recall': 0.8145910780669146, 'f1-score': 0.8278630460448643, 'support': 2152.0} | {'precision': 0.9999115904871364, 'recall': 0.9998231966053748, 'f1-score': 0.9998673915926268, 'support': 11312.0} | {'precision': 0.8798415137058301, 'recall': 0.9012672906485546, 'f1-score': 0.8904255319148937, 'support': 12073.0} | 0.8855 | {'precision': 0.8298628720971998, 'recall': 0.8204151130192705, 'f1-score': 0.8249408661553546, 'support': 29705.0} | {'precision': 0.8832645976626653, 'recall': 0.8854738259552264, 'f1-score': 0.8842386364262106, 'support': 29705.0} |
77
 
78
 
79
  ### Framework versions
meta_data/README_s42_e6.md CHANGED
@@ -1,5 +1,4 @@
1
  ---
2
- license: apache-2.0
3
  base_model: allenai/longformer-base-4096
4
  tags:
5
  - generated_from_trainer
@@ -17,12 +16,12 @@ model-index:
17
  name: essays_su_g
18
  type: essays_su_g
19
  config: sep_tok
20
- split: test
21
  args: sep_tok
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.8949278982249309
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -32,14 +31,14 @@ should probably proofread and complete it, then remove this comment. -->
32
 
33
  This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.2702
36
- - Claim: {'precision': 0.6345128453708192, 'recall': 0.6157102539981185, 'f1-score': 0.6249701599427071, 'support': 4252.0}
37
- - Majorclaim: {'precision': 0.8918156161806209, 'recall': 0.8689275893675527, 'f1-score': 0.8802228412256268, 'support': 2182.0}
38
- - O: {'precision': 1.0, 'recall': 0.9992100412534012, 'f1-score': 0.9996048645563507, 'support': 11393.0}
39
- - Premise: {'precision': 0.8856428052618837, 'recall': 0.8995081967213114, 'f1-score': 0.892521654263755, 'support': 12200.0}
40
- - Accuracy: 0.8949
41
- - Macro avg: {'precision': 0.8529928167033309, 'recall': 0.8458390203350961, 'f1-score': 0.84932987999711, 'support': 30027.0}
42
- - Weighted avg: {'precision': 0.8939198893401877, 'recall': 0.8949278982249309, 'f1-score': 0.8943711247723389, 'support': 30027.0}
43
 
44
  ## Model description
45
 
@@ -68,14 +67,14 @@ The following hyperparameters were used during training:
68
 
69
  ### Training results
70
 
71
- | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
- |:-------------:|:-----:|:----:|:---------------:|:-------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
- | No log | 1.0 | 41 | 0.3401 | {'precision': 0.5311909262759924, 'recall': 0.3304327375352775, 'f1-score': 0.40742351747136435, 'support': 4252.0} | {'precision': 0.6367229608336328, 'recall': 0.8120989917506874, 'f1-score': 0.7137965760322256, 'support': 2182.0} | {'precision': 0.9957709251101322, 'recall': 0.9920126393399455, 'f1-score': 0.9938882293452932, 'support': 11393.0} | {'precision': 0.842705109819609, 'recall': 0.9151639344262295, 'f1-score': 0.8774411568234507, 'support': 12200.0} | 0.8540 | {'precision': 0.7515974805098417, 'recall': 0.7624270757630349, 'f1-score': 0.7481373699180834, 'support': 30027.0} | {'precision': 0.8417015955188155, 'recall': 0.8540313717654111, 'f1-score': 0.8431751302283804, 'support': 30027.0} |
74
- | No log | 2.0 | 82 | 0.2705 | {'precision': 0.5723863918549789, 'recall': 0.5420978363123237, 'f1-score': 0.5568305350887789, 'support': 4252.0} | {'precision': 0.8547959724430313, 'recall': 0.7392300641613199, 'f1-score': 0.7928237896289015, 'support': 2182.0} | {'precision': 0.9999120492524186, 'recall': 0.9978934433424033, 'f1-score': 0.9989017264859641, 'support': 11393.0} | {'precision': 0.8669073216667974, 'recall': 0.9054918032786885, 'f1-score': 0.8857795774365553, 'support': 12200.0} | 0.8770 | {'precision': 0.8235004338043066, 'recall': 0.7961782867736839, 'f1-score': 0.8085839071600499, 'support': 30027.0} | {'precision': 0.8747866603891763, 'recall': 0.8770106903786592, 'f1-score': 0.875364943509119, 'support': 30027.0} |
75
- | No log | 3.0 | 123 | 0.2601 | {'precision': 0.6572898799313893, 'recall': 0.4506114769520226, 'f1-score': 0.5346728059160039, 'support': 4252.0} | {'precision': 0.7966573816155988, 'recall': 0.9175068744271311, 'f1-score': 0.8528221512247072, 'support': 2182.0} | {'precision': 0.9997364490907493, 'recall': 0.9988589484771351, 'f1-score': 0.9992975061468212, 'support': 11393.0} | {'precision': 0.8583535108958837, 'recall': 0.9298360655737705, 'f1-score': 0.8926660371419579, 'support': 12200.0} | 0.8873 | {'precision': 0.8280093053834052, 'recall': 0.8242033413575147, 'f1-score': 0.8198646251073725, 'support': 30027.0} | {'precision': 0.8790426340817996, 'recall': 0.8872681253538482, 'f1-score': 0.8795353796712885, 'support': 30027.0} |
76
- | No log | 4.0 | 164 | 0.2546 | {'precision': 0.6344463971880492, 'recall': 0.5943085606773283, 'f1-score': 0.6137219186399513, 'support': 4252.0} | {'precision': 0.8699369936993699, 'recall': 0.885884509624198, 'f1-score': 0.8778383287920073, 'support': 2182.0} | {'precision': 1.0, 'recall': 0.9992978144474678, 'f1-score': 0.9996487839143033, 'support': 11393.0} | {'precision': 0.8826083460641634, 'recall': 0.8997540983606558, 'f1-score': 0.8910987539067257, 'support': 12200.0} | 0.8933 | {'precision': 0.8467479342378956, 'recall': 0.8448112457774125, 'f1-score': 0.845576946313247, 'support': 30027.0} | {'precision': 0.8910877018376263, 'recall': 0.8932627302094781, 'f1-score': 0.8920435682645267, 'support': 30027.0} |
77
- | No log | 5.0 | 205 | 0.2814 | {'precision': 0.6258696212316414, 'recall': 0.5712605832549389, 'f1-score': 0.5973195622771426, 'support': 4252.0} | {'precision': 0.868504292815183, 'recall': 0.8808432630614116, 'f1-score': 0.8746302616609783, 'support': 2182.0} | {'precision': 1.0, 'recall': 0.9992100412534012, 'f1-score': 0.9996048645563507, 'support': 11393.0} | {'precision': 0.8768826201290939, 'recall': 0.9019672131147541, 'f1-score': 0.8892480504262799, 'support': 12200.0} | 0.8905 | {'precision': 0.8428141335439796, 'recall': 0.8383202751711265, 'f1-score': 0.8402006847301878, 'support': 30027.0} | {'precision': 0.8874427003022152, 'recall': 0.8904985513038266, 'f1-score': 0.8887191676437134, 'support': 30027.0} |
78
- | No log | 6.0 | 246 | 0.2702 | {'precision': 0.6345128453708192, 'recall': 0.6157102539981185, 'f1-score': 0.6249701599427071, 'support': 4252.0} | {'precision': 0.8918156161806209, 'recall': 0.8689275893675527, 'f1-score': 0.8802228412256268, 'support': 2182.0} | {'precision': 1.0, 'recall': 0.9992100412534012, 'f1-score': 0.9996048645563507, 'support': 11393.0} | {'precision': 0.8856428052618837, 'recall': 0.8995081967213114, 'f1-score': 0.892521654263755, 'support': 12200.0} | 0.8949 | {'precision': 0.8529928167033309, 'recall': 0.8458390203350961, 'f1-score': 0.84932987999711, 'support': 30027.0} | {'precision': 0.8939198893401877, 'recall': 0.8949278982249309, 'f1-score': 0.8943711247723389, 'support': 30027.0} |
79
 
80
 
81
  ### Framework versions
 
1
  ---
 
2
  base_model: allenai/longformer-base-4096
3
  tags:
4
  - generated_from_trainer
 
16
  name: essays_su_g
17
  type: essays_su_g
18
  config: sep_tok
19
+ split: train[80%:100%]
20
  args: sep_tok
21
  metrics:
22
  - name: Accuracy
23
  type: accuracy
24
+ value: 0.8985356000673287
25
  ---
26
 
27
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
31
 
32
  This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
33
  It achieves the following results on the evaluation set:
34
+ - Loss: 0.2583
35
+ - Claim: {'precision': 0.6372016071850626, 'recall': 0.6468330134357005, 'f1-score': 0.6419811882366948, 'support': 4168.0}
36
+ - Majorclaim: {'precision': 0.892292490118577, 'recall': 0.8392193308550185, 'f1-score': 0.8649425287356322, 'support': 2152.0}
37
+ - O: {'precision': 1.0, 'recall': 0.9997347949080623, 'f1-score': 0.9998673798682641, 'support': 11312.0}
38
+ - Premise: {'precision': 0.8961370562556626, 'recall': 0.9011844611944008, 'f1-score': 0.8986536714297514, 'support': 12073.0}
39
+ - Accuracy: 0.8985
40
+ - Macro avg: {'precision': 0.8564077883898256, 'recall': 0.8467429000982954, 'f1-score': 0.8513611920675855, 'support': 29705.0}
41
+ - Weighted avg: {'precision': 0.8990786876841317, 'recall': 0.8985356000673287, 'f1-score': 0.8987402622673225, 'support': 29705.0}
42
 
43
  ## Model description
44
 
 
67
 
68
  ### Training results
69
 
70
+ | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
71
+ |:-------------:|:-----:|:----:|:---------------:|:---------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
72
+ | No log | 1.0 | 41 | 0.3743 | {'precision': 0.43195876288659796, 'recall': 0.30158349328214973, 'f1-score': 0.35518508053122355, 'support': 4168.0} | {'precision': 0.6687344913151365, 'recall': 0.5009293680297398, 'f1-score': 0.5727948990435707, 'support': 2152.0} | {'precision': 0.9990991802540312, 'recall': 0.980463224893918, 'f1-score': 0.9896934814616517, 'support': 11312.0} | {'precision': 0.8109643516545946, 'recall': 0.9459123664375052, 'f1-score': 0.8732555916650737, 'support': 12073.0} | 0.8364 | {'precision': 0.72768919652759, 'recall': 0.6822221131608281, 'f1-score': 0.69773226317538, 'support': 29705.0} | {'precision': 0.8191248373533424, 'recall': 0.836424844302306, 'f1-score': 0.8231372987329588, 'support': 29705.0} |
73
+ | No log | 2.0 | 82 | 0.2730 | {'precision': 0.5999298737727911, 'recall': 0.41050863723608444, 'f1-score': 0.4874643874643875, 'support': 4168.0} | {'precision': 0.749693752552062, 'recall': 0.8531598513011153, 'f1-score': 0.7980873723103673, 'support': 2152.0} | {'precision': 0.9993805309734514, 'recall': 0.9983203677510608, 'f1-score': 0.9988501680523616, 'support': 11312.0} | {'precision': 0.8544719169719169, 'recall': 0.9274413981611861, 'f1-score': 0.8894626047583112, 'support': 12073.0} | 0.8765 | {'precision': 0.8008690185675553, 'recall': 0.7973575636123617, 'f1-score': 0.7934661331463568, 'support': 29705.0} | {'precision': 0.8663484493974304, 'recall': 0.8765191045278573, 'f1-score': 0.8680932745470084, 'support': 29705.0} |
74
+ | No log | 3.0 | 123 | 0.2507 | {'precision': 0.6004319654427646, 'recall': 0.6002879078694817, 'f1-score': 0.6003599280143971, 'support': 4168.0} | {'precision': 0.7873417721518987, 'recall': 0.8671003717472119, 'f1-score': 0.825298540468819, 'support': 2152.0} | {'precision': 0.9997347480106101, 'recall': 0.999557991513437, 'f1-score': 0.9996463619485456, 'support': 11312.0} | {'precision': 0.8981278461797942, 'recall': 0.8821336867390044, 'f1-score': 0.8900589193932557, 'support': 12073.0} | 0.8862 | {'precision': 0.8214090829462669, 'recall': 0.8372699894672837, 'f1-score': 0.8288409374562543, 'support': 29705.0} | {'precision': 0.8870243017021042, 'recall': 0.8862144420131292, 'f1-score': 0.8864508877040778, 'support': 29705.0} |
75
+ | No log | 4.0 | 164 | 0.2533 | {'precision': 0.6344515441959532, 'recall': 0.5717370441458733, 'f1-score': 0.6014639071176174, 'support': 4168.0} | {'precision': 0.9013713080168776, 'recall': 0.7941449814126395, 'f1-score': 0.8443675889328064, 'support': 2152.0} | {'precision': 0.9999115904871364, 'recall': 0.9998231966053748, 'f1-score': 0.9998673915926268, 'support': 11312.0} | {'precision': 0.8741170930780098, 'recall': 0.9225544603661062, 'f1-score': 0.8976828531130365, 'support': 12073.0} | 0.8935 | {'precision': 0.8524628839444942, 'recall': 0.8220649206324984, 'f1-score': 0.8358454351890219, 'support': 29705.0} | {'precision': 0.8903673007029912, 'recall': 0.8934522807608147, 'f1-score': 0.8911700264460231, 'support': 29705.0} |
76
+ | No log | 5.0 | 205 | 0.2560 | {'precision': 0.6261571326845479, 'recall': 0.6329174664107485, 'f1-score': 0.6295191504593723, 'support': 4168.0} | {'precision': 0.8881709741550696, 'recall': 0.8303903345724907, 'f1-score': 0.8583093179634967, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9990275813295615, 'f1-score': 0.9995135541502675, 'support': 11312.0} | {'precision': 0.8916988258477707, 'recall': 0.8995278721113228, 'f1-score': 0.8955962394854031, 'support': 12073.0} | 0.8950 | {'precision': 0.8515067331718471, 'recall': 0.8404658136060308, 'f1-score': 0.8457345655146349, 'support': 29705.0} | {'precision': 0.8954265877754937, 'recall': 0.8950008416091567, 'f1-score': 0.8951337550993843, 'support': 29705.0} |
77
+ | No log | 6.0 | 246 | 0.2583 | {'precision': 0.6372016071850626, 'recall': 0.6468330134357005, 'f1-score': 0.6419811882366948, 'support': 4168.0} | {'precision': 0.892292490118577, 'recall': 0.8392193308550185, 'f1-score': 0.8649425287356322, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9997347949080623, 'f1-score': 0.9998673798682641, 'support': 11312.0} | {'precision': 0.8961370562556626, 'recall': 0.9011844611944008, 'f1-score': 0.8986536714297514, 'support': 12073.0} | 0.8985 | {'precision': 0.8564077883898256, 'recall': 0.8467429000982954, 'f1-score': 0.8513611920675855, 'support': 29705.0} | {'precision': 0.8990786876841317, 'recall': 0.8985356000673287, 'f1-score': 0.8987402622673225, 'support': 29705.0} |
78
 
79
 
80
  ### Framework versions
meta_data/README_s42_e7.md CHANGED
@@ -17,12 +17,12 @@ model-index:
17
  name: essays_su_g
18
  type: essays_su_g
19
  config: sep_tok
20
- split: test
21
  args: sep_tok
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.8970593132847104
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -32,14 +32,14 @@ should probably proofread and complete it, then remove this comment. -->
32
 
33
  This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.2727
36
- - Claim: {'precision': 0.6323296354992076, 'recall': 0.6568673565380997, 'f1-score': 0.6443649786595916, 'support': 4252.0}
37
- - Majorclaim: {'precision': 0.8782371649250341, 'recall': 0.885884509624198, 'f1-score': 0.8820442619210586, 'support': 2182.0}
38
- - O: {'precision': 1.0, 'recall': 0.9996489072237339, 'f1-score': 0.9998244227899218, 'support': 11393.0}
39
- - Premise: {'precision': 0.9002495840266223, 'recall': 0.8869672131147541, 'f1-score': 0.8935590421139553, 'support': 12200.0}
40
- - Accuracy: 0.8971
41
- - Macro avg: {'precision': 0.852704096112716, 'recall': 0.8573419966251964, 'f1-score': 0.8549481763711319, 'support': 30027.0}
42
- - Weighted avg: {'precision': 0.8985587647495203, 'recall': 0.8970593132847104, 'f1-score': 0.897754701815305, 'support': 30027.0}
43
 
44
  ## Model description
45
 
@@ -70,13 +70,13 @@ The following hyperparameters were used during training:
70
 
71
  | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
  |:-------------:|:-----:|:----:|:---------------:|:-------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
- | No log | 1.0 | 41 | 0.3545 | {'precision': 0.5265911072362686, 'recall': 0.28410159924741296, 'f1-score': 0.3690803544149099, 'support': 4252.0} | {'precision': 0.5903767014878126, 'recall': 0.8547204399633364, 'f1-score': 0.6983710915558883, 'support': 2182.0} | {'precision': 0.9956778689247596, 'recall': 0.9907838146230141, 'f1-score': 0.9932248130224374, 'support': 11393.0} | {'precision': 0.8420336934350684, 'recall': 0.9136065573770492, 'f1-score': 0.8763612061170736, 'support': 12200.0} | 0.8495 | {'precision': 0.7386698427709772, 'recall': 0.7608031028027031, 'f1-score': 0.7342593662775774, 'support': 30027.0} | {'precision': 0.837374242221422, 'recall': 0.8494688114030706, 'f1-score': 0.8359340726059904, 'support': 30027.0} |
74
- | No log | 2.0 | 82 | 0.2887 | {'precision': 0.5331588132635253, 'recall': 0.5747883349012229, 'f1-score': 0.5531914893617021, 'support': 4252.0} | {'precision': 0.9024745269286754, 'recall': 0.5682859761686526, 'f1-score': 0.6974128233970753, 'support': 2182.0} | {'precision': 0.9994723419224343, 'recall': 0.997542350566137, 'f1-score': 0.9985064136355649, 'support': 11393.0} | {'precision': 0.8662781540400063, 'recall': 0.9016393442622951, 'f1-score': 0.8836051088440838, 'support': 12200.0} | 0.8675 | {'precision': 0.8253459590386604, 'recall': 0.7605640014745769, 'f1-score': 0.7831789588096065, 'support': 30027.0} | {'precision': 0.8722740387839361, 'recall': 0.8675192326905785, 'f1-score': 0.8668828351772135, 'support': 30027.0} |
75
- | No log | 3.0 | 123 | 0.2610 | {'precision': 0.6448462929475588, 'recall': 0.4193320790216369, 'f1-score': 0.5081943850648425, 'support': 4252.0} | {'precision': 0.8409090909090909, 'recall': 0.847846012832264, 'f1-score': 0.8443633044272022, 'support': 2182.0} | {'precision': 0.9999121959785758, 'recall': 0.9995611340296673, 'f1-score': 0.9997366341848828, 'support': 11393.0} | {'precision': 0.8441453960359834, 'recall': 0.9460655737704918, 'f1-score': 0.8922042283461523, 'support': 12200.0} | 0.8846 | {'precision': 0.8324532439678022, 'recall': 0.803201199913515, 'f1-score': 0.81112463800577, 'support': 30027.0} | {'precision': 0.8747901406867009, 'recall': 0.8846371598894328, 'f1-score': 0.8751501753304457, 'support': 30027.0} |
76
- | No log | 4.0 | 164 | 0.2530 | {'precision': 0.6281010374379793, 'recall': 0.6549858889934148, 'f1-score': 0.6412618005986644, 'support': 4252.0} | {'precision': 0.8315485996705108, 'recall': 0.9252978918423465, 'f1-score': 0.8759219088937094, 'support': 2182.0} | {'precision': 1.0, 'recall': 0.9996489072237339, 'f1-score': 0.9998244227899218, 'support': 11393.0} | {'precision': 0.9083729619565217, 'recall': 0.8768032786885246, 'f1-score': 0.8923089756423088, 'support': 12200.0} | 0.8955 | {'precision': 0.8420056497662529, 'recall': 0.8641839916870049, 'f1-score': 0.8523292769811512, 'support': 30027.0} | {'precision': 0.8978677454136914, 'recall': 0.8955273587104939, 'f1-score': 0.896362471543389, 'support': 30027.0} |
77
- | No log | 5.0 | 205 | 0.2707 | {'precision': 0.6235240690281563, 'recall': 0.6458137347130762, 'f1-score': 0.6344731977818855, 'support': 4252.0} | {'precision': 0.873358348968105, 'recall': 0.8533455545371219, 'f1-score': 0.8632359758924432, 'support': 2182.0} | {'precision': 1.0, 'recall': 1.0, 'f1-score': 1.0, 'support': 11393.0} | {'precision': 0.8975037196230782, 'recall': 0.89, 'f1-score': 0.8937361099678987, 'support': 12200.0} | 0.8945 | {'precision': 0.848596534404835, 'recall': 0.8472898223125496, 'f1-score': 0.8478613209105568, 'support': 30027.0} | {'precision': 0.8958416637811863, 'recall': 0.8944949545409132, 'f1-score': 0.8951257694066757, 'support': 30027.0} |
78
- | No log | 6.0 | 246 | 0.2700 | {'precision': 0.631960692559663, 'recall': 0.6352304797742239, 'f1-score': 0.6335913675815154, 'support': 4252.0} | {'precision': 0.885956644674835, 'recall': 0.8615948670944088, 'f1-score': 0.8736059479553903, 'support': 2182.0} | {'precision': 1.0, 'recall': 0.9995611340296673, 'f1-score': 0.9997805188534304, 'support': 11393.0} | {'precision': 0.8923466470636282, 'recall': 0.8954918032786885, 'f1-score': 0.8939164586998323, 'support': 12200.0} | 0.8957 | {'precision': 0.8525659960745315, 'recall': 0.8479695710442472, 'f1-score': 0.850223573272542, 'support': 30027.0} | {'precision': 0.8958565077303907, 'recall': 0.8956605721517301, 'f1-score': 0.8957444606797332, 'support': 30027.0} |
79
- | No log | 7.0 | 287 | 0.2727 | {'precision': 0.6323296354992076, 'recall': 0.6568673565380997, 'f1-score': 0.6443649786595916, 'support': 4252.0} | {'precision': 0.8782371649250341, 'recall': 0.885884509624198, 'f1-score': 0.8820442619210586, 'support': 2182.0} | {'precision': 1.0, 'recall': 0.9996489072237339, 'f1-score': 0.9998244227899218, 'support': 11393.0} | {'precision': 0.9002495840266223, 'recall': 0.8869672131147541, 'f1-score': 0.8935590421139553, 'support': 12200.0} | 0.8971 | {'precision': 0.852704096112716, 'recall': 0.8573419966251964, 'f1-score': 0.8549481763711319, 'support': 30027.0} | {'precision': 0.8985587647495203, 'recall': 0.8970593132847104, 'f1-score': 0.897754701815305, 'support': 30027.0} |
80
 
81
 
82
  ### Framework versions
 
17
  name: essays_su_g
18
  type: essays_su_g
19
  config: sep_tok
20
+ split: train[80%:100%]
21
  args: sep_tok
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
+ value: 0.8963474162598889
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
32
 
33
  This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 0.2715
36
+ - Claim: {'precision': 0.6254587155963303, 'recall': 0.6542706333973128, 'f1-score': 0.6395403377110694, 'support': 4168.0}
37
+ - Majorclaim: {'precision': 0.8875878220140515, 'recall': 0.8805762081784386, 'f1-score': 0.8840681128994634, 'support': 2152.0}
38
+ - O: {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0}
39
+ - Premise: {'precision': 0.8987139615028998, 'recall': 0.8856125238134681, 'f1-score': 0.8921151439299123, 'support': 12073.0}
40
+ - Accuracy: 0.8963
41
+ - Macro avg: {'precision': 0.8529180263075345, 'recall': 0.8551148413473049, 'f1-score': 0.8539198489113544, 'support': 29705.0}
42
+ - Weighted avg: {'precision': 0.8981038432990452, 'recall': 0.8963474162598889, 'f1-score': 0.8971595644270212, 'support': 29705.0}
43
 
44
  ## Model description
45
 
 
70
 
71
  | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
  |:-------------:|:-----:|:----:|:---------------:|:-------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
+ | No log | 1.0 | 41 | 0.3907 | {'precision': 0.4622047244094488, 'recall': 0.28166986564299423, 'f1-score': 0.3500298151460942, 'support': 4168.0} | {'precision': 0.6884593519044911, 'recall': 0.5627323420074349, 'f1-score': 0.6192789567885452, 'support': 2152.0} | {'precision': 0.9988159213043082, 'recall': 0.9694130127298444, 'f1-score': 0.9838948454533218, 'support': 11312.0} | {'precision': 0.8006515561100714, 'recall': 0.9567630249316657, 'f1-score': 0.8717735849056604, 'support': 12073.0} | 0.8383 | {'precision': 0.7375328884320799, 'recall': 0.6926445613279848, 'f1-score': 0.7062443005734054, 'support': 29705.0} | {'precision': 0.8204984263709232, 'recall': 0.838310048813331, 'f1-score': 0.8229710003996594, 'support': 29705.0} |
74
+ | No log | 2.0 | 82 | 0.2736 | {'precision': 0.6199246546672248, 'recall': 0.3553262955854127, 'f1-score': 0.45173097453103556, 'support': 4168.0} | {'precision': 0.7746005046257359, 'recall': 0.8559479553903345, 'f1-score': 0.8132450331125828, 'support': 2152.0} | {'precision': 0.9999114103472715, 'recall': 0.9977899575671852, 'f1-score': 0.9988495575221238, 'support': 11312.0} | {'precision': 0.8387545787545787, 'recall': 0.9483144206079682, 'f1-score': 0.8901761069859658, 'support': 12073.0} | 0.8773 | {'precision': 0.8082977870987028, 'recall': 0.7893446572877252, 'f1-score': 0.7885004180379269, 'support': 29705.0} | {'precision': 0.8647725349186985, 'recall': 0.87725972058576, 'f1-score': 0.8644672730999988, 'support': 29705.0} |
75
+ | No log | 3.0 | 123 | 0.2407 | {'precision': 0.6129186602870813, 'recall': 0.6146833013435701, 'f1-score': 0.6137997125059894, 'support': 4168.0} | {'precision': 0.7996618765849535, 'recall': 0.879182156133829, 'f1-score': 0.8375387339530764, 'support': 2152.0} | {'precision': 0.9999115904871364, 'recall': 0.9998231966053748, 'f1-score': 0.9998673915926268, 'support': 11312.0} | {'precision': 0.9024307900067522, 'recall': 0.8856125238134681, 'f1-score': 0.8939425609297271, 'support': 12073.0} | 0.8906 | {'precision': 0.8287307293414808, 'recall': 0.8448252944740605, 'f1-score': 0.836287099745355, 'support': 29705.0} | {'precision': 0.8914850757054159, 'recall': 0.890624473994277, 'f1-score': 0.8908860134318254, 'support': 29705.0} |
76
+ | No log | 4.0 | 164 | 0.2498 | {'precision': 0.6335050149091895, 'recall': 0.560700575815739, 'f1-score': 0.5948835433371515, 'support': 4168.0} | {'precision': 0.8946840521564694, 'recall': 0.828996282527881, 'f1-score': 0.8605885190545105, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.872137855063341, 'recall': 0.9180816698417957, 'f1-score': 0.894520216286014, 'support': 12073.0} | 0.8927 | {'precision': 0.8500596320614642, 'recall': 0.8269446320463539, 'f1-score': 0.8374870199456621, 'support': 29705.0} | {'precision': 0.8889456116800479, 'recall': 0.8926780003366437, 'f1-score': 0.890170129437975, 'support': 29705.0} |
77
+ | No log | 5.0 | 205 | 0.2543 | {'precision': 0.6193029490616622, 'recall': 0.6650671785028791, 'f1-score': 0.6413697362332255, 'support': 4168.0} | {'precision': 0.8613728129205922, 'recall': 0.8921933085501859, 'f1-score': 0.8765122118237845, 'support': 2152.0} | {'precision': 0.9999115592111082, 'recall': 0.9994695898161244, 'f1-score': 0.9996905256642645, 'support': 11312.0} | {'precision': 0.9060976652698195, 'recall': 0.8775780667605401, 'f1-score': 0.8916098628292518, 'support': 12073.0} | 0.8952 | {'precision': 0.8466712466157955, 'recall': 0.8585770359074323, 'f1-score': 0.8522955841376315, 'support': 29705.0} | {'precision': 0.8983418837129342, 'recall': 0.8952364921730348, 'f1-score': 0.8965624790680554, 'support': 29705.0} |
78
+ | No log | 6.0 | 246 | 0.2768 | {'precision': 0.6175036567528035, 'recall': 0.607725527831094, 'f1-score': 0.6125755743651752, 'support': 4168.0} | {'precision': 0.9085303186022611, 'recall': 0.8215613382899628, 'f1-score': 0.8628599316739872, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8816429034348672, 'recall': 0.9014329495568624, 'f1-score': 0.8914281033706025, 'support': 12073.0} | 0.8920 | {'precision': 0.8518971212266971, 'recall': 0.8326799539194798, 'f1-score': 0.8417048526286843, 'support': 29705.0} | {'precision': 0.8915666503464329, 'recall': 0.8919710486450093, 'f1-score': 0.8915603797680256, 'support': 29705.0} |
79
+ | No log | 7.0 | 287 | 0.2715 | {'precision': 0.6254587155963303, 'recall': 0.6542706333973128, 'f1-score': 0.6395403377110694, 'support': 4168.0} | {'precision': 0.8875878220140515, 'recall': 0.8805762081784386, 'f1-score': 0.8840681128994634, 'support': 2152.0} | {'precision': 0.9999116061168567, 'recall': 1.0, 'f1-score': 0.9999558011049724, 'support': 11312.0} | {'precision': 0.8987139615028998, 'recall': 0.8856125238134681, 'f1-score': 0.8921151439299123, 'support': 12073.0} | 0.8963 | {'precision': 0.8529180263075345, 'recall': 0.8551148413473049, 'f1-score': 0.8539198489113544, 'support': 29705.0} | {'precision': 0.8981038432990452, 'recall': 0.8963474162598889, 'f1-score': 0.8971595644270212, 'support': 29705.0} |
80
 
81
 
82
  ### Framework versions
meta_data/README_s42_e8.md ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: allenai/longformer-base-4096
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - essays_su_g
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: longformer-sep_tok
12
+ results:
13
+ - task:
14
+ name: Token Classification
15
+ type: token-classification
16
+ dataset:
17
+ name: essays_su_g
18
+ type: essays_su_g
19
+ config: sep_tok
20
+ split: train[80%:100%]
21
+ args: sep_tok
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.8973236828816697
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # longformer-sep_tok
32
+
33
+ This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.2957
36
+ - Claim: {'precision': 0.6376988984088128, 'recall': 0.625, 'f1-score': 0.6312855931176542, 'support': 4168.0}
37
+ - Majorclaim: {'precision': 0.9108095007270964, 'recall': 0.8731412639405205, 'f1-score': 0.8915776986951364, 'support': 2152.0}
38
+ - O: {'precision': 0.9999115904871364, 'recall': 0.9998231966053748, 'f1-score': 0.9998673915926268, 'support': 11312.0}
39
+ - Premise: {'precision': 0.8869018455005716, 'recall': 0.8996107015654767, 'f1-score': 0.8932110695341092, 'support': 12073.0}
40
+ - Accuracy: 0.8973
41
+ - Macro avg: {'precision': 0.8588304587809044, 'recall': 0.849393790527843, 'f1-score': 0.8539854382348816, 'support': 29705.0}
42
+ - Weighted avg: {'precision': 0.896702809171908, 'recall': 0.8973236828816697, 'f1-score': 0.8969571027061913, 'support': 29705.0}
43
+
44
+ ## Model description
45
+
46
+ More information needed
47
+
48
+ ## Intended uses & limitations
49
+
50
+ More information needed
51
+
52
+ ## Training and evaluation data
53
+
54
+ More information needed
55
+
56
+ ## Training procedure
57
+
58
+ ### Training hyperparameters
59
+
60
+ The following hyperparameters were used during training:
61
+ - learning_rate: 2e-05
62
+ - train_batch_size: 8
63
+ - eval_batch_size: 8
64
+ - seed: 42
65
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
+ - lr_scheduler_type: linear
67
+ - num_epochs: 8
68
+
69
+ ### Training results
70
+
71
+ | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
+ |:-------------:|:-----:|:----:|:---------------:|:--------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
+ | No log | 1.0 | 41 | 0.3532 | {'precision': 0.46718648473034435, 'recall': 0.3450095969289827, 'f1-score': 0.39690863924924097, 'support': 4168.0} | {'precision': 0.72375, 'recall': 0.5381040892193308, 'f1-score': 0.6172707889125799, 'support': 2152.0} | {'precision': 0.999551489056333, 'recall': 0.9850601131541725, 'f1-score': 0.9922528940338379, 'support': 11312.0} | {'precision': 0.8174220044671806, 'recall': 0.9397001573759629, 'f1-score': 0.8743064118372379, 'support': 12073.0} | 0.8444 | {'precision': 0.7519774945634644, 'recall': 0.7019684891696123, 'f1-score': 0.7201846835082242, 'support': 29705.0} | {'precision': 0.8308502128427397, 'recall': 0.8444369634741626, 'f1-score': 0.8336154853914309, 'support': 29705.0} |
74
+ | No log | 2.0 | 82 | 0.2649 | {'precision': 0.6305418719211823, 'recall': 0.39923224568138194, 'f1-score': 0.48890847656823855, 'support': 4168.0} | {'precision': 0.7800252844500632, 'recall': 0.8601301115241635, 'f1-score': 0.818121546961326, 'support': 2152.0} | {'precision': 0.9998229775181448, 'recall': 0.9985855728429985, 'f1-score': 0.999203892083149, 'support': 11312.0} | {'precision': 0.8480029861888765, 'recall': 0.9408597697341174, 'f1-score': 0.8920213601382126, 'support': 12073.0} | 0.8810 | {'precision': 0.8145982800195667, 'recall': 0.7997019249456654, 'f1-score': 0.7995638189377315, 'support': 29705.0} | {'precision': 0.8703804244486646, 'recall': 0.8809964652415418, 'f1-score': 0.8709219460558968, 'support': 29705.0} |
75
+ | No log | 3.0 | 123 | 0.2493 | {'precision': 0.6082398829839103, 'recall': 0.5986084452975048, 'f1-score': 0.6033857315598549, 'support': 4168.0} | {'precision': 0.7881733495342244, 'recall': 0.904275092936803, 'f1-score': 0.8422419389742479, 'support': 2152.0} | {'precision': 0.9998230088495575, 'recall': 0.9987623762376238, 'f1-score': 0.9992924111091456, 'support': 11312.0} | {'precision': 0.8988507689707622, 'recall': 0.8810569038350037, 'f1-score': 0.8898648931275358, 'support': 12073.0} | 0.8879 | {'precision': 0.8237717525846135, 'recall': 0.8456757045767338, 'f1-score': 0.833696243692696, 'support': 29705.0} | {'precision': 0.8885075270279347, 'recall': 0.8879313246928127, 'f1-score': 0.8878892775966825, 'support': 29705.0} |
76
+ | No log | 4.0 | 164 | 0.2464 | {'precision': 0.6338532344668882, 'recall': 0.5947696737044146, 'f1-score': 0.6136898130956802, 'support': 4168.0} | {'precision': 0.8428571428571429, 'recall': 0.9047397769516728, 'f1-score': 0.8727028238458091, 'support': 2152.0} | {'precision': 0.9999115904871364, 'recall': 0.9998231966053748, 'f1-score': 0.9998673915926268, 'support': 11312.0} | {'precision': 0.8904132095621458, 'recall': 0.897788453574091, 'f1-score': 0.8940856223707004, 'support': 12073.0} | 0.8946 | {'precision': 0.8417587943433282, 'recall': 0.8492802752088883, 'f1-score': 0.8450864127262041, 'support': 29705.0} | {'precision': 0.8926674783141166, 'recall': 0.8946305335802054, 'f1-score': 0.8934762252306426, 'support': 29705.0} |
77
+ | No log | 5.0 | 205 | 0.2565 | {'precision': 0.6154180238870792, 'recall': 0.6799424184261037, 'f1-score': 0.6460731790721531, 'support': 4168.0} | {'precision': 0.9039196940726577, 'recall': 0.8787174721189591, 'f1-score': 0.8911404335532517, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9993811881188119, 'f1-score': 0.9996904982977407, 'support': 11312.0} | {'precision': 0.9022472870204221, 'recall': 0.8745962064109998, 'f1-score': 0.8882065948855989, 'support': 12073.0} | 0.8951 | {'precision': 0.8553962512450398, 'recall': 0.8581593212687186, 'f1-score': 0.8562776764521861, 'support': 29705.0} | {'precision': 0.8993478876082566, 'recall': 0.8951018347079617, 'f1-score': 0.8968989853619663, 'support': 29705.0} |
78
+ | No log | 6.0 | 246 | 0.2794 | {'precision': 0.6114372689715155, 'recall': 0.6746641074856046, 'f1-score': 0.6414965210448272, 'support': 4168.0} | {'precision': 0.9110212335692619, 'recall': 0.837360594795539, 'f1-score': 0.872639225181598, 'support': 2152.0} | {'precision': 0.9999115904871364, 'recall': 0.9998231966053748, 'f1-score': 0.9998673915926268, 'support': 11312.0} | {'precision': 0.8996361174578996, 'recall': 0.8805599271100804, 'f1-score': 0.8899958141481791, 'support': 12073.0} | 0.8940 | {'precision': 0.8555015526214533, 'recall': 0.8481019564991497, 'f1-score': 0.8509997379918077, 'support': 29705.0} | {'precision': 0.8982088870349785, 'recall': 0.8939572462548393, 'f1-score': 0.8957110422559299, 'support': 29705.0} |
79
+ | No log | 7.0 | 287 | 0.2945 | {'precision': 0.6273562107298212, 'recall': 0.6228406909788867, 'f1-score': 0.6250902961714422, 'support': 4168.0} | {'precision': 0.9130213631739573, 'recall': 0.8341078066914498, 'f1-score': 0.87178241864983, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9997347949080623, 'f1-score': 0.9998673798682641, 'support': 11312.0} | {'precision': 0.8854539537910836, 'recall': 0.9015157790110163, 'f1-score': 0.8934126821260003, 'support': 12073.0} | 0.8949 | {'precision': 0.8564578819237155, 'recall': 0.8395497678973538, 'f1-score': 0.8475381942038841, 'support': 29705.0} | {'precision': 0.8948570356502946, 'recall': 0.8949335128766202, 'f1-score': 0.8947356751979132, 'support': 29705.0} |
80
+ | No log | 8.0 | 328 | 0.2957 | {'precision': 0.6376988984088128, 'recall': 0.625, 'f1-score': 0.6312855931176542, 'support': 4168.0} | {'precision': 0.9108095007270964, 'recall': 0.8731412639405205, 'f1-score': 0.8915776986951364, 'support': 2152.0} | {'precision': 0.9999115904871364, 'recall': 0.9998231966053748, 'f1-score': 0.9998673915926268, 'support': 11312.0} | {'precision': 0.8869018455005716, 'recall': 0.8996107015654767, 'f1-score': 0.8932110695341092, 'support': 12073.0} | 0.8973 | {'precision': 0.8588304587809044, 'recall': 0.849393790527843, 'f1-score': 0.8539854382348816, 'support': 29705.0} | {'precision': 0.896702809171908, 'recall': 0.8973236828816697, 'f1-score': 0.8969571027061913, 'support': 29705.0} |
81
+
82
+
83
+ ### Framework versions
84
+
85
+ - Transformers 4.37.2
86
+ - Pytorch 2.2.0+cu121
87
+ - Datasets 2.17.0
88
+ - Tokenizers 0.15.2
meta_data/README_s42_e9.md ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: allenai/longformer-base-4096
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - essays_su_g
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: longformer-sep_tok
12
+ results:
13
+ - task:
14
+ name: Token Classification
15
+ type: token-classification
16
+ dataset:
17
+ name: essays_su_g
18
+ type: essays_su_g
19
+ config: sep_tok
20
+ split: train[80%:100%]
21
+ args: sep_tok
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.8961117656960108
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # longformer-sep_tok
32
+
33
+ This model is a fine-tuned version of [allenai/longformer-base-4096](https://huggingface.co/allenai/longformer-base-4096) on the essays_su_g dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.3184
36
+ - Claim: {'precision': 0.6251433815095205, 'recall': 0.6537907869481766, 'f1-score': 0.6391462413510027, 'support': 4168.0}
37
+ - Majorclaim: {'precision': 0.8963831867057673, 'recall': 0.8522304832713755, 'f1-score': 0.873749404478323, 'support': 2152.0}
38
+ - O: {'precision': 1.0, 'recall': 0.999557991513437, 'f1-score': 0.9997789469030461, 'support': 11312.0}
39
+ - Premise: {'precision': 0.8966063537063287, 'recall': 0.8906651205168558, 'f1-score': 0.8936258622122497, 'support': 12073.0}
40
+ - Accuracy: 0.8961
41
+ - Macro avg: {'precision': 0.8545332304804041, 'recall': 0.8490610955624612, 'f1-score': 0.8515751137361555, 'support': 29705.0}
42
+ - Weighted avg: {'precision': 0.8978738508742299, 'recall': 0.8961117656960108, 'f1-score': 0.8969033743223053, 'support': 29705.0}
43
+
44
+ ## Model description
45
+
46
+ More information needed
47
+
48
+ ## Intended uses & limitations
49
+
50
+ More information needed
51
+
52
+ ## Training and evaluation data
53
+
54
+ More information needed
55
+
56
+ ## Training procedure
57
+
58
+ ### Training hyperparameters
59
+
60
+ The following hyperparameters were used during training:
61
+ - learning_rate: 2e-05
62
+ - train_batch_size: 8
63
+ - eval_batch_size: 8
64
+ - seed: 42
65
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
66
+ - lr_scheduler_type: linear
67
+ - num_epochs: 9
68
+
69
+ ### Training results
70
+
71
+ | Training Loss | Epoch | Step | Validation Loss | Claim | Majorclaim | O | Premise | Accuracy | Macro avg | Weighted avg |
72
+ |:-------------:|:-----:|:----:|:---------------:|:--------------------------------------------------------------------------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|:--------:|:-------------------------------------------------------------------------------------------------------------------:|:-------------------------------------------------------------------------------------------------------------------:|
73
+ | No log | 1.0 | 41 | 0.3777 | {'precision': 0.44112006137322596, 'recall': 0.2759117082533589, 'f1-score': 0.33948339483394835, 'support': 4168.0} | {'precision': 0.7229235880398671, 'recall': 0.5055762081784386, 'f1-score': 0.5950232430954334, 'support': 2152.0} | {'precision': 0.9991005576542543, 'recall': 0.981966053748232, 'f1-score': 0.9904592064199733, 'support': 11312.0} | {'precision': 0.7974438687392055, 'recall': 0.9561003892984345, 'f1-score': 0.8695946963989755, 'support': 12073.0} | 0.8379 | {'precision': 0.7401470189516381, 'recall': 0.6798885898696161, 'f1-score': 0.6986401351870827, 'support': 29705.0} | {'precision': 0.8188414513630281, 'recall': 0.8378724120518432, 'f1-score': 0.8213481946290806, 'support': 29705.0} |
74
+ | No log | 2.0 | 82 | 0.2665 | {'precision': 0.6216867469879518, 'recall': 0.3714011516314779, 'f1-score': 0.4650045058576149, 'support': 4168.0} | {'precision': 0.7490087232355274, 'recall': 0.8777881040892194, 'f1-score': 0.808301240907146, 'support': 2152.0} | {'precision': 0.9999115044247787, 'recall': 0.9988507779349364, 'f1-score': 0.9993808597205024, 'support': 11312.0} | {'precision': 0.8468603001567984, 'recall': 0.9394516690135012, 'f1-score': 0.8907563025210085, 'support': 12073.0} | 0.8779 | {'precision': 0.8043668187012641, 'recall': 0.7968729256672837, 'f1-score': 0.7908607272515679, 'support': 29705.0} | {'precision': 0.8664602079008504, 'recall': 0.8778993435448578, 'f1-score': 0.8664097012738992, 'support': 29705.0} |
75
+ | No log | 3.0 | 123 | 0.2423 | {'precision': 0.6248701973001038, 'recall': 0.5774952015355086, 'f1-score': 0.6002493765586034, 'support': 4168.0} | {'precision': 0.8225806451612904, 'recall': 0.8531598513011153, 'f1-score': 0.8375912408759125, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.886398700771417, 'recall': 0.904166321543941, 'f1-score': 0.895194357880925, 'support': 12073.0} | 0.8909 | {'precision': 0.8334623858082029, 'recall': 0.8335727410491722, 'f1-score': 0.8331924249677761, 'support': 29705.0} | {'precision': 0.8883401462766283, 'recall': 0.8909274532906918, 'f1-score': 0.8894467745743578, 'support': 29705.0} |
76
+ | No log | 4.0 | 164 | 0.2482 | {'precision': 0.6370126304228446, 'recall': 0.5566218809980806, 'f1-score': 0.5941101152368758, 'support': 4168.0} | {'precision': 0.85006753714543, 'recall': 0.8773234200743495, 'f1-score': 0.8634804482048937, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9997347949080623, 'f1-score': 0.9998673798682641, 'support': 11312.0} | {'precision': 0.8778424958110588, 'recall': 0.9112896546011762, 'f1-score': 0.8942534341217588, 'support': 12073.0} | 0.8927 | {'precision': 0.8412306658448333, 'recall': 0.8362424376454172, 'f1-score': 0.8379278443579481, 'support': 29705.0} | {'precision': 0.8885576985512976, 'recall': 0.8927453290691802, 'f1-score': 0.890129015184852, 'support': 29705.0} |
77
+ | No log | 5.0 | 205 | 0.2617 | {'precision': 0.6225581395348837, 'recall': 0.642274472168906, 'f1-score': 0.6322626358053851, 'support': 4168.0} | {'precision': 0.8676403468735737, 'recall': 0.883364312267658, 'f1-score': 0.8754317292194335, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9994695898161244, 'f1-score': 0.9997347245556637, 'support': 11312.0} | {'precision': 0.8983036614040981, 'recall': 0.8860266710842376, 'f1-score': 0.8921229306534341, 'support': 12073.0} | 0.8948 | {'precision': 0.8471255369531389, 'recall': 0.8527837613342315, 'f1-score': 0.8498880050584791, 'support': 29705.0} | {'precision': 0.8961186485839084, 'recall': 0.8948325197778152, 'f1-score': 0.8954317149728881, 'support': 29705.0} |
78
+ | No log | 6.0 | 246 | 0.2822 | {'precision': 0.5918902562033488, 'recall': 0.7039347408829175, 'f1-score': 0.6430684931506849, 'support': 4168.0} | {'precision': 0.9098445595854923, 'recall': 0.8159851301115242, 'f1-score': 0.8603625673689368, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9996463932107497, 'f1-score': 0.9998231653404068, 'support': 11312.0} | {'precision': 0.9072111207645526, 'recall': 0.8649051602749938, 'f1-score': 0.8855531526947378, 'support': 12073.0} | 0.8901 | {'precision': 0.8522364841383483, 'recall': 0.8461178561200462, 'f1-score': 0.8472018446386915, 'support': 29705.0} | {'precision': 0.8984933156395887, 'recall': 0.8900858441339842, 'f1-score': 0.8932197469531816, 'support': 29705.0} |
79
+ | No log | 7.0 | 287 | 0.3260 | {'precision': 0.6209569633787757, 'recall': 0.557341650671785, 'f1-score': 0.5874320394487292, 'support': 4168.0} | {'precision': 0.9243511871893981, 'recall': 0.7778810408921933, 'f1-score': 0.8448145344436034, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9997347949080623, 'f1-score': 0.9998673798682641, 'support': 11312.0} | {'precision': 0.8664746184989099, 'recall': 0.9218089952787211, 'f1-score': 0.893285708552394, 'support': 12073.0} | 0.8899 | {'precision': 0.852945692266771, 'recall': 0.8141916204376904, 'f1-score': 0.8313499155782478, 'support': 29705.0} | {'precision': 0.8870661655388541, 'recall': 0.8899175223026426, 'f1-score': 0.8874464157201748, 'support': 29705.0} |
80
+ | No log | 8.0 | 328 | 0.3097 | {'precision': 0.6349094330745707, 'recall': 0.647552783109405, 'f1-score': 0.6411687848913172, 'support': 4168.0} | {'precision': 0.896469465648855, 'recall': 0.8731412639405205, 'f1-score': 0.8846516007532956, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.9996463932107497, 'f1-score': 0.9998231653404068, 'support': 11312.0} | {'precision': 0.8954356846473029, 'recall': 0.89372981032055, 'f1-score': 0.8945819342536168, 'support': 12073.0} | 0.8980 | {'precision': 0.8567036458426821, 'recall': 0.8535175626453062, 'f1-score': 0.8550563713096592, 'support': 29705.0} | {'precision': 0.8987746112734568, 'recall': 0.8980306345733041, 'f1-score': 0.8983823961899579, 'support': 29705.0} |
81
+ | No log | 9.0 | 369 | 0.3184 | {'precision': 0.6251433815095205, 'recall': 0.6537907869481766, 'f1-score': 0.6391462413510027, 'support': 4168.0} | {'precision': 0.8963831867057673, 'recall': 0.8522304832713755, 'f1-score': 0.873749404478323, 'support': 2152.0} | {'precision': 1.0, 'recall': 0.999557991513437, 'f1-score': 0.9997789469030461, 'support': 11312.0} | {'precision': 0.8966063537063287, 'recall': 0.8906651205168558, 'f1-score': 0.8936258622122497, 'support': 12073.0} | 0.8961 | {'precision': 0.8545332304804041, 'recall': 0.8490610955624612, 'f1-score': 0.8515751137361555, 'support': 29705.0} | {'precision': 0.8978738508742299, 'recall': 0.8961117656960108, 'f1-score': 0.8969033743223053, 'support': 29705.0} |
82
+
83
+
84
+ ### Framework versions
85
+
86
+ - Transformers 4.37.2
87
+ - Pytorch 2.2.0+cu121
88
+ - Datasets 2.17.0
89
+ - Tokenizers 0.15.2
meta_data/meta_s42_e10_cvi0.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6712258064516129, "recall": 0.6102768653214453, "f1-score": 0.6393019540371144, "support": 4262.0}, "MajorClaim": {"precision": 0.910411622276029, "recall": 0.8683602771362586, "f1-score": 0.8888888888888888, "support": 2165.0}, "O": {"precision": 0.9981060606060606, "recall": 0.9989286302950388, "f1-score": 0.9985171760441552, "support": 12134.0}, "Premise": {"precision": 0.8886504883101509, "recall": 0.9211595981286909, "f1-score": 0.9046130672189794, "support": 13039.0}, "accuracy": 0.9054746835443038, "macro avg": {"precision": 0.8670984944109634, "recall": 0.8496813427203583, "f1-score": 0.8578302715472845, "support": 31600.0}, "weighted avg": {"precision": 0.9028461457529865, "recall": 0.9054746835443038, "f1-score": 0.9038103344980595, "support": 31600.0}}
meta_data/meta_s42_e10_cvi1.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6557759626604434, "recall": 0.645383555351401, "f1-score": 0.6505382567426785, "support": 4354.0}, "MajorClaim": {"precision": 0.9040860215053763, "recall": 0.8948488718603661, "f1-score": 0.899443731279418, "support": 2349.0}, "O": {"precision": 0.9999202233745512, "recall": 0.9998404594767071, "f1-score": 0.9998803398348689, "support": 12536.0}, "Premise": {"precision": 0.8994653994653995, "recall": 0.9057873485868102, "f1-score": 0.9026153043737426, "support": 13374.0}, "accuracy": 0.9063870235795541, "macro avg": {"precision": 0.8648119017514426, "recall": 0.8614650588188211, "f1-score": 0.863119408057677, "support": 32613.0}, "weighted avg": {"precision": 0.9058779253246658, "recall": 0.9063870235795541, "f1-score": 0.9061206854781015, "support": 32613.0}}
meta_data/meta_s42_e10_cvi2.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.7019810508182601, "recall": 0.7153829273644942, "f1-score": 0.7086186284099554, "support": 4557.0}, "MajorClaim": {"precision": 0.9089703932832524, "recall": 0.9065667695019832, "f1-score": 0.9077669902912623, "support": 2269.0}, "O": {"precision": 0.9999095104515429, "recall": 0.9981031523800922, "f1-score": 0.9990055148720731, "support": 11071.0}, "Premise": {"precision": 0.9214399226145236, "recall": 0.9175725884133755, "f1-score": 0.9195021891267625, "support": 14534.0}, "accuracy": 0.9158829514970245, "macro avg": {"precision": 0.8830752192918947, "recall": 0.8844063594149862, "f1-score": 0.8837233306750134, "support": 32431.0}, "weighted avg": {"precision": 0.916517760674263, "recall": 0.9158829514970245, "f1-score": 0.9161892406201513, "support": 32431.0}}
meta_data/meta_s42_e10_cvi3.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6933191940615059, "recall": 0.6617408906882591, "f1-score": 0.6771620921802176, "support": 4940.0}, "MajorClaim": {"precision": 0.8809114359415305, "recall": 0.9364716636197441, "f1-score": 0.9078422684980062, "support": 2188.0}, "O": {"precision": 0.9995483628152051, "recall": 0.9944581741930653, "f1-score": 0.996996771529394, "support": 13353.0}, "Premise": {"precision": 0.9079980067272954, "recall": 0.9168501163595195, "f1-score": 0.9124025913059807, "support": 15899.0}, "accuracy": 0.911874656404618, "macro avg": {"precision": 0.8704442498863842, "recall": 0.877380211215147, "f1-score": 0.8736009308783996, "support": 36380.0}, "weighted avg": {"precision": 0.9108207981894616, "recall": 0.911874656404618, "f1-score": 0.9112349177638729, "support": 36380.0}}
meta_data/meta_s42_e10_cvi4.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6261209473442171, "recall": 0.6533109404990403, "f1-score": 0.6394270282963485, "support": 4168.0}, "MajorClaim": {"precision": 0.9034416826003824, "recall": 0.8782527881040892, "f1-score": 0.8906691800188501, "support": 2152.0}, "O": {"precision": 0.9998231340643792, "recall": 0.9994695898161244, "f1-score": 0.9996463306808134, "support": 11312.0}, "Premise": {"precision": 0.8951990632318502, "recall": 0.8865236478091609, "f1-score": 0.8908402347163843, "support": 12073.0}, "accuracy": 0.8962127587948157, "macro avg": {"precision": 0.8561462068102073, "recall": 0.8543892415571037, "f1-score": 0.8551456934280991, "support": 29705.0}, "weighted avg": {"precision": 0.8978830564693184, "recall": 0.8962127587948157, "f1-score": 0.8969858736149474, "support": 29705.0}}
meta_data/meta_s42_e11_cvi0.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6419193124850799, "recall": 0.6309244486156734, "f1-score": 0.6363743935628919, "support": 4262.0}, "MajorClaim": {"precision": 0.9044972208185953, "recall": 0.8267898383371824, "f1-score": 0.8638996138996139, "support": 2165.0}, "O": {"precision": 0.9963806860245126, "recall": 0.9982693258612164, "f1-score": 0.9973241118109588, "support": 12134.0}, "Premise": {"precision": 0.8938606403013183, "recall": 0.910039113428944, "f1-score": 0.9018773276582809, "support": 13039.0}, "accuracy": 0.9005696202531646, "macro avg": {"precision": 0.8591644649073765, "recall": 0.841505681560754, "f1-score": 0.8498688617329364, "support": 31600.0}, "weighted avg": {"precision": 0.8999755925947466, "recall": 0.9005696202531646, "f1-score": 0.900116442326209, "support": 31600.0}}
meta_data/meta_s42_e11_cvi1.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6739235607160136, "recall": 0.639871382636656, "f1-score": 0.6564561734213007, "support": 4354.0}, "MajorClaim": {"precision": 0.893425888117599, "recall": 0.9314601958280119, "f1-score": 0.9120466861192162, "support": 2349.0}, "O": {"precision": 0.9999202297383536, "recall": 0.9999202297383536, "f1-score": 0.9999202297383536, "support": 12536.0}, "Premise": {"precision": 0.9000296428042093, "recall": 0.908105278899357, "f1-score": 0.9040494268274527, "support": 13374.0}, "accuracy": 0.9092693097844418, "macro avg": {"precision": 0.8668248303440439, "recall": 0.8698392717755946, "f1-score": 0.8681181290265808, "support": 32613.0}, "weighted avg": {"precision": 0.9077642975932683, "recall": 0.9092693097844418, "f1-score": 0.9084219445975756, "support": 32613.0}}
meta_data/meta_s42_e11_cvi2.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.696518456375839, "recall": 0.7287689269256089, "f1-score": 0.7122788203753351, "support": 4557.0}, "MajorClaim": {"precision": 0.9159472966833258, "recall": 0.8884971353018951, "f1-score": 0.9020134228187919, "support": 2269.0}, "O": {"precision": 0.9999095268252963, "recall": 0.998283804534369, "f1-score": 0.9990960043391791, "support": 11071.0}, "Premise": {"precision": 0.9226872093830245, "recall": 0.9147516168983074, "f1-score": 0.9187022768890578, "support": 14534.0}, "accuracy": 0.9152970922882427, "macro avg": {"precision": 0.8837656223168714, "recall": 0.8825753709150451, "f1-score": 0.8830226311055909, "support": 32431.0}, "weighted avg": {"precision": 0.9167973017956553, "recall": 0.9152970922882427, "f1-score": 0.9159734142385637, "support": 32431.0}}
meta_data/meta_s42_e11_cvi3.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.7055879899916597, "recall": 0.6850202429149798, "f1-score": 0.695152013147083, "support": 4940.0}, "MajorClaim": {"precision": 0.8569044639132248, "recall": 0.9387568555758684, "f1-score": 0.8959651035986914, "support": 2188.0}, "O": {"precision": 0.9990970654627539, "recall": 0.9943832846551337, "f1-score": 0.9967346019592388, "support": 13353.0}, "Premise": {"precision": 0.9171541800339686, "recall": 0.917038807472168, "f1-score": 0.9170964901245439, "support": 15899.0}, "accuracy": 0.9152281473336998, "macro avg": {"precision": 0.8696859248504017, "recall": 0.8837997976545375, "f1-score": 0.8762370522073893, "support": 36380.0}, "weighted avg": {"precision": 0.9148787537406586, "recall": 0.9152281473336998, "f1-score": 0.9149185494247488, "support": 36380.0}}
meta_data/meta_s42_e11_cvi4.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6256446319737459, "recall": 0.6403550863723608, "f1-score": 0.6329143941190419, "support": 4168.0}, "MajorClaim": {"precision": 0.9062196307094267, "recall": 0.866635687732342, "f1-score": 0.8859857482185273, "support": 2152.0}, "O": {"precision": 0.9999115904871364, "recall": 0.9998231966053748, "f1-score": 0.9998673915926268, "support": 11312.0}, "Premise": {"precision": 0.8913007456503729, "recall": 0.8910792677876253, "f1-score": 0.8911899929586214, "support": 12073.0}, "accuracy": 0.8955394714694496, "macro avg": {"precision": 0.8557691497051705, "recall": 0.8494733096244258, "f1-score": 0.8524893817222043, "support": 29705.0}, "weighted avg": {"precision": 0.8964667660387375, "recall": 0.8955394714694496, "f1-score": 0.8959591059935927, "support": 29705.0}}
meta_data/meta_s42_e12_cvi0.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.645798619376339, "recall": 0.6365556076959175, "f1-score": 0.6411438024341252, "support": 4262.0}, "MajorClaim": {"precision": 0.915603532875368, "recall": 0.8618937644341801, "f1-score": 0.887937187723055, "support": 2165.0}, "O": {"precision": 0.9983503794127351, "recall": 0.9975276083731663, "f1-score": 0.9979388243053838, "support": 12134.0}, "Premise": {"precision": 0.893782579134245, "recall": 0.9073548585014188, "f1-score": 0.9005175825848682, "support": 13039.0}, "accuracy": 0.9023417721518987, "macro avg": {"precision": 0.8633837776996717, "recall": 0.8508329597511708, "f1-score": 0.8568843492618581, "support": 31600.0}, "weighted avg": {"precision": 0.9019838581513505, "recall": 0.9023417721518987, "f1-score": 0.9020815617354518, "support": 31600.0}}
meta_data/meta_s42_e12_cvi1.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6641880546893739, "recall": 0.6359669269637115, "f1-score": 0.6497712073213656, "support": 4354.0}, "MajorClaim": {"precision": 0.9122187913542126, "recall": 0.8803746275010643, "f1-score": 0.8960138648180244, "support": 2349.0}, "O": {"precision": 1.0, "recall": 0.9996809189534142, "f1-score": 0.9998404340194671, "support": 12536.0}, "Premise": {"precision": 0.8958592891168926, "recall": 0.9140122625990729, "f1-score": 0.9048447388874494, "support": 13374.0}, "accuracy": 0.907398890013185, "macro avg": {"precision": 0.8680665337901199, "recall": 0.8575086840043157, "f1-score": 0.8626175612615767, "support": 32613.0}, "weighted avg": {"precision": 0.9061386215208016, "recall": 0.907398890013185, "f1-score": 0.9066701506725403, "support": 32613.0}}
meta_data/meta_s42_e12_cvi2.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6920757967269595, "recall": 0.7052885670397191, "f1-score": 0.6986197152483425, "support": 4557.0}, "MajorClaim": {"precision": 0.9017341040462428, "recall": 0.8937858087263112, "f1-score": 0.897742363877822, "support": 2269.0}, "O": {"precision": 0.9999095513748191, "recall": 0.9985547827657845, "f1-score": 0.9992317078682154, "support": 11071.0}, "Premise": {"precision": 0.9181742853197072, "recall": 0.9148892252648961, "f1-score": 0.916528811690102, "support": 14534.0}, "accuracy": 0.9125219697203293, "macro avg": {"precision": 0.8779734343669321, "recall": 0.8781295959491777, "f1-score": 0.8780306496711205, "support": 32431.0}, "weighted avg": {"precision": 0.9131561713753177, "recall": 0.9125219697203293, "f1-score": 0.9128275863506654, "support": 32431.0}}
meta_data/meta_s42_e12_cvi3.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.7021923160408075, "recall": 0.6548582995951417, "f1-score": 0.6776998009846025, "support": 4940.0}, "MajorClaim": {"precision": 0.8957325120985482, "recall": 0.9305301645338209, "f1-score": 0.9127998206680117, "support": 2188.0}, "O": {"precision": 0.9992491928823485, "recall": 0.9967048603310118, "f1-score": 0.9979754049190162, "support": 13353.0}, "Premise": {"precision": 0.9045794450281194, "recall": 0.9206239386124914, "f1-score": 0.9125311720698255, "support": 15899.0}, "accuracy": 0.9130566245189665, "macro avg": {"precision": 0.8754383665124559, "recall": 0.8756793157681164, "f1-score": 0.8752515496603639, "support": 36380.0}, "weighted avg": {"precision": 0.9113132448260934, "recall": 0.9130566245189665, "f1-score": 0.9120214873861275, "support": 36380.0}}
meta_data/meta_s42_e12_cvi4.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6307230422817113, "recall": 0.6048464491362764, "f1-score": 0.6175137783221066, "support": 4168.0}, "MajorClaim": {"precision": 0.8988711194731891, "recall": 0.8880111524163569, "f1-score": 0.8934081346423562, "support": 2152.0}, "O": {"precision": 0.9999116061168567, "recall": 1.0, "f1-score": 0.9999558011049724, "support": 11312.0}, "Premise": {"precision": 0.8821419838617655, "recall": 0.8964631823076286, "f1-score": 0.8892449264645469, "support": 12073.0}, "accuracy": 0.8943612186500589, "macro avg": {"precision": 0.8529119379333807, "recall": 0.8473301959650654, "f1-score": 0.8500306601334956, "support": 29705.0}, "weighted avg": {"precision": 0.892924576633343, "recall": 0.8943612186500589, "f1-score": 0.8935790524525439, "support": 29705.0}}
meta_data/meta_s42_e13_cvi0.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6565823682344599, "recall": 0.6518066635382449, "f1-score": 0.6541858000706463, "support": 4262.0}, "MajorClaim": {"precision": 0.9326347305389222, "recall": 0.8632794457274827, "f1-score": 0.8966178939793716, "support": 2165.0}, "O": {"precision": 0.9979423868312757, "recall": 0.9992582825119499, "f1-score": 0.9985999011694944, "support": 12134.0}, "Premise": {"precision": 0.8967082860385925, "recall": 0.9088120254620753, "f1-score": 0.9027195855869582, "support": 13039.0}, "accuracy": 0.905759493670886, "macro avg": {"precision": 0.8709669429108126, "recall": 0.8557891043099383, "f1-score": 0.8630307952016177, "support": 31600.0}, "weighted avg": {"precision": 0.9056557122943019, "recall": 0.905759493670886, "f1-score": 0.905597768912197, "support": 31600.0}}
meta_data/meta_s42_e13_cvi1.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.671142578125, "recall": 0.631373449701424, "f1-score": 0.6506508875739644, "support": 4354.0}, "MajorClaim": {"precision": 0.9009774755631109, "recall": 0.9025117071094083, "f1-score": 0.9017439387494683, "support": 2349.0}, "O": {"precision": 0.9995214166068438, "recall": 0.9996011486917677, "f1-score": 0.9995612810593069, "support": 12536.0}, "Premise": {"precision": 0.8978498569017392, "recall": 0.9148347540002991, "f1-score": 0.9062627310099627, "support": 13374.0}, "accuracy": 0.908686720019624, "macro avg": {"precision": 0.8673728317991735, "recall": 0.8620802648757248, "f1-score": 0.8645547095981756, "support": 32613.0}, "weighted avg": {"precision": 0.906889747647909, "recall": 0.908686720019624, "f1-score": 0.9076744997548908, "support": 32613.0}}
meta_data/meta_s42_e13_cvi2.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6950662899369702, "recall": 0.7017774851876234, "f1-score": 0.6984057654509719, "support": 4557.0}, "MajorClaim": {"precision": 0.8949227373068432, "recall": 0.8933450859409432, "f1-score": 0.8941332157035731, "support": 2269.0}, "O": {"precision": 0.9999094612947035, "recall": 0.9975611959172613, "f1-score": 0.9987339482727438, "support": 11071.0}, "Premise": {"precision": 0.9170798898071625, "recall": 0.9161965047474886, "f1-score": 0.9166379844427617, "support": 14534.0}, "accuracy": 0.912244457463538, "macro avg": {"precision": 0.8767445945864198, "recall": 0.8772200679483291, "f1-score": 0.8769777284675127, "support": 32431.0}, "weighted avg": {"precision": 0.9126093410207197, "recall": 0.912244457463538, "f1-score": 0.9124240185874667, "support": 32431.0}}
meta_data/meta_s42_e13_cvi3.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.7050160085378868, "recall": 0.6686234817813765, "f1-score": 0.6863376623376624, "support": 4940.0}, "MajorClaim": {"precision": 0.8845654993514915, "recall": 0.9351005484460695, "f1-score": 0.9091313041546323, "support": 2188.0}, "O": {"precision": 0.9998496805712138, "recall": 0.9962555231034225, "f1-score": 0.9980493660439642, "support": 13353.0}, "Premise": {"precision": 0.9093114387012502, "recall": 0.9194917919365998, "f1-score": 0.9143732799599701, "support": 15899.0}, "accuracy": 0.9145409565695437, "macro avg": {"precision": 0.8746856567904606, "recall": 0.879867836316867, "f1-score": 0.8769729031240572, "support": 36380.0}, "weighted avg": {"precision": 0.9133134618839149, "recall": 0.9145409565695437, "f1-score": 0.9138059732904619, "support": 36380.0}}
meta_data/meta_s42_e13_cvi4.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6449313621964097, "recall": 0.5861324376199616, "f1-score": 0.6141277023629965, "support": 4168.0}, "MajorClaim": {"precision": 0.920619554695063, "recall": 0.8838289962825279, "f1-score": 0.9018492176386913, "support": 2152.0}, "O": {"precision": 1.0, "recall": 0.9996463932107497, "f1-score": 0.9998231653404068, "support": 11312.0}, "Premise": {"precision": 0.8746711313082994, "recall": 0.9087219415224054, "f1-score": 0.8913714657133571, "support": 12073.0}, "accuracy": 0.8962800875273523, "macro avg": {"precision": 0.860055512049943, "recall": 0.8445824421589111, "f1-score": 0.851792887763863, "support": 29705.0}, "weighted avg": {"precision": 0.8934910542879485, "recall": 0.8962800875273523, "f1-score": 0.8945292419355487, "support": 29705.0}}
meta_data/meta_s42_e14_cvi0.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6261579179532422, "recall": 0.6661191928671985, "f1-score": 0.6455206912232833, "support": 4262.0}, "MajorClaim": {"precision": 0.9465325569084172, "recall": 0.8258660508083141, "f1-score": 0.882091761223483, "support": 2165.0}, "O": {"precision": 0.998680194671286, "recall": 0.9977748475358497, "f1-score": 0.9982273158263594, "support": 12134.0}, "Premise": {"precision": 0.896583422705684, "recall": 0.8976148477643991, "f1-score": 0.8970988387690184, "support": 13039.0}, "accuracy": 0.8999367088607595, "macro avg": {"precision": 0.8669885230596573, "recall": 0.8468437347439404, "f1-score": 0.855734651760536, "support": 31600.0}, "weighted avg": {"precision": 0.9027362266716531, "recall": 0.8999367088607595, "f1-score": 0.9009715144933148, "support": 31600.0}}
meta_data/meta_s42_e14_cvi1.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6878803245436106, "recall": 0.6231051906293064, "f1-score": 0.6538925042178839, "support": 4354.0}, "MajorClaim": {"precision": 0.8802296964725185, "recall": 0.9135802469135802, "f1-score": 0.8965949446417381, "support": 2349.0}, "O": {"precision": 1.0, "recall": 0.9998404594767071, "f1-score": 0.9999202233745512, "support": 12536.0}, "Premise": {"precision": 0.8991019931371833, "recall": 0.9208165096455809, "f1-score": 0.9098297070666026, "support": 13374.0}, "accuracy": 0.9109250912212922, "macro avg": {"precision": 0.8668030035383281, "recall": 0.8643356016662938, "f1-score": 0.865059344825194, "support": 32613.0}, "weighted avg": {"precision": 0.9083273708733792, "recall": 0.9109250912212922, "f1-score": 0.9093371327648554, "support": 32613.0}}
meta_data/meta_s42_e14_cvi2.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.7133158125273763, "recall": 0.7147245995172262, "f1-score": 0.7140195111257261, "support": 4557.0}, "MajorClaim": {"precision": 0.886353852776582, "recall": 0.9074482150727192, "f1-score": 0.8967770034843205, "support": 2269.0}, "O": {"precision": 0.9999094612947035, "recall": 0.9975611959172613, "f1-score": 0.9987339482727438, "support": 11071.0}, "Premise": {"precision": 0.9228116161964545, "recall": 0.920462364111738, "f1-score": 0.9216354930935897, "support": 14534.0}, "accuracy": 0.9169621658289908, "macro avg": {"precision": 0.880597685698779, "recall": 0.8850490936547362, "f1-score": 0.882791488994095, "support": 32431.0}, "weighted avg": {"precision": 0.9171428178418873, "recall": 0.9169621658289908, "f1-score": 0.9170425744212524, "support": 32431.0}}
meta_data/meta_s42_e14_cvi3.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6844962799115222, "recall": 0.6890688259109312, "f1-score": 0.6867749419953597, "support": 4940.0}, "MajorClaim": {"precision": 0.8884101680310211, "recall": 0.9424131627056673, "f1-score": 0.9146152140164117, "support": 2188.0}, "O": {"precision": 0.999024902490249, "recall": 0.9974537557103272, "f1-score": 0.9982387108862657, "support": 13353.0}, "Premise": {"precision": 0.9153230925479243, "recall": 0.906975281464243, "f1-score": 0.9111300666603481, "support": 15899.0}, "accuracy": 0.9127267729521715, "macro avg": {"precision": 0.8718136107451792, "recall": 0.8839777564477922, "f1-score": 0.8776897333895963, "support": 36380.0}, "weighted avg": {"precision": 0.9130828598621918, "recall": 0.9127267729521715, "f1-score": 0.9128472990110548, "support": 36380.0}}
meta_data/meta_s42_e14_cvi4.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6411675893306492, "recall": 0.6113243761996161, "f1-score": 0.6258904446082044, "support": 4168.0}, "MajorClaim": {"precision": 0.918905715681485, "recall": 0.8740706319702602, "f1-score": 0.8959276018099548, "support": 2152.0}, "O": {"precision": 0.9999116061168567, "recall": 1.0, "f1-score": 0.9999558011049724, "support": 11312.0}, "Premise": {"precision": 0.8821437232236683, "recall": 0.9039178331814793, "f1-score": 0.8928980526918672, "support": 12073.0}, "accuracy": 0.8972900185154015, "macro avg": {"precision": 0.8605321585881648, "recall": 0.8473282103378389, "f1-score": 0.8536679750537497, "support": 29705.0}, "weighted avg": {"precision": 0.8958422107843774, "recall": 0.8972900185154015, "f1-score": 0.8964216725962088, "support": 29705.0}}
meta_data/meta_s42_e15_cvi0.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.63760092272203, "recall": 0.6485218207414359, "f1-score": 0.643015005234384, "support": 4262.0}, "MajorClaim": {"precision": 0.9371428571428572, "recall": 0.833256351039261, "f1-score": 0.8821515892420538, "support": 2165.0}, "O": {"precision": 0.9985148514851485, "recall": 0.9973627822647108, "f1-score": 0.9979384843737115, "support": 12134.0}, "Premise": {"precision": 0.8953101361573373, "recall": 0.9077383234910653, "f1-score": 0.9014813968544119, "support": 13039.0}, "accuracy": 0.9020886075949367, "macro avg": {"precision": 0.8671421918768432, "recall": 0.8467198193841182, "f1-score": 0.8561466189261403, "support": 31600.0}, "weighted avg": {"precision": 0.9030473889756928, "recall": 0.9020886075949367, "f1-score": 0.9023351153795343, "support": 31600.0}}
meta_data/meta_s42_e15_cvi1.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6871180640430212, "recall": 0.6456132292145154, "f1-score": 0.6657193605683837, "support": 4354.0}, "MajorClaim": {"precision": 0.9006076388888888, "recall": 0.8833546189868029, "f1-score": 0.8918977004083387, "support": 2349.0}, "O": {"precision": 0.9999202233745512, "recall": 0.9998404594767071, "f1-score": 0.9998803398348689, "support": 12536.0}, "Premise": {"precision": 0.9018490097200906, "recall": 0.9226858082847316, "f1-score": 0.91214842739402, "support": 13374.0}, "accuracy": 0.9125195474197406, "macro avg": {"precision": 0.872373734006638, "recall": 0.8628735289906893, "f1-score": 0.8674114570514028, "support": 32613.0}, "weighted avg": {"precision": 0.9107891935980492, "recall": 0.9125195474197406, "f1-score": 0.9115132861837755, "support": 32613.0}}
meta_data/meta_s42_e15_cvi2.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6956247370635255, "recall": 0.7256967303050252, "f1-score": 0.7103426055203522, "support": 4557.0}, "MajorClaim": {"precision": 0.8857018687527162, "recall": 0.8981930365799912, "f1-score": 0.8919037199124725, "support": 2269.0}, "O": {"precision": 0.9999094612947035, "recall": 0.9975611959172613, "f1-score": 0.9987339482727438, "support": 11071.0}, "Premise": {"precision": 0.9254762403181913, "recall": 0.9125498830328884, "f1-score": 0.9189676078295513, "support": 14534.0}, "accuracy": 0.9143103820418735, "macro avg": {"precision": 0.876678076857284, "recall": 0.8835002114587915, "f1-score": 0.8799869703837799, "support": 32431.0}, "weighted avg": {"precision": 0.9158055190952068, "recall": 0.9143103820418735, "f1-score": 0.9149893480731366, "support": 32431.0}}
meta_data/meta_s42_e15_cvi3.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.693723849372385, "recall": 0.671255060728745, "f1-score": 0.6823045267489712, "support": 4940.0}, "MajorClaim": {"precision": 0.893368010403121, "recall": 0.9419561243144424, "f1-score": 0.9170189098998889, "support": 2188.0}, "O": {"precision": 0.9995495833646123, "recall": 0.997154197558601, "f1-score": 0.998350453625253, "support": 13353.0}, "Premise": {"precision": 0.9108439769596794, "recall": 0.915026102270583, "f1-score": 0.912930250070597, "support": 15899.0}, "accuracy": 0.9136888400219901, "macro avg": {"precision": 0.8743713550249494, "recall": 0.8813478712180929, "f1-score": 0.8776510350861776, "support": 36380.0}, "weighted avg": {"precision": 0.912869131363695, "recall": 0.9136888400219901, "f1-score": 0.9132125725709538, "support": 36380.0}}
meta_data/meta_s42_e15_cvi4.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6588266384778013, "recall": 0.5981285988483686, "f1-score": 0.6270120724346077, "support": 4168.0}, "MajorClaim": {"precision": 0.9058606368251039, "recall": 0.9121747211895911, "f1-score": 0.9090067145172495, "support": 2152.0}, "O": {"precision": 1.0, "recall": 0.999557991513437, "f1-score": 0.9997789469030461, "support": 11312.0}, "Premise": {"precision": 0.8805334618783642, "recall": 0.9078108175267124, "f1-score": 0.8939641109298531, "support": 12073.0}, "accuracy": 0.8996128597879145, "macro avg": {"precision": 0.8613051842953173, "recall": 0.8544180322695273, "f1-score": 0.857440461196189, "support": 29705.0}, "weighted avg": {"precision": 0.8967541492974446, "recall": 0.8996128597879145, "f1-score": 0.8978925071931304, "support": 29705.0}}
meta_data/meta_s42_e16_cvi0.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.5950994175537256, "recall": 0.6952135147817926, "f1-score": 0.6412725895465858, "support": 4262.0}, "MajorClaim": {"precision": 0.781058282208589, "recall": 0.9408775981524249, "f1-score": 0.8535512256442489, "support": 2165.0}, "O": {"precision": 0.9986824769433466, "recall": 0.9995055216746332, "f1-score": 0.9990938298047615, "support": 12134.0}, "Premise": {"precision": 0.9320077512848597, "recall": 0.8483779430937956, "f1-score": 0.8882286815480969, "support": 13039.0}, "accuracy": 0.8920886075949367, "macro avg": {"precision": 0.8267119819976302, "recall": 0.8709936444256616, "f1-score": 0.8455365816359233, "support": 31600.0}, "weighted avg": {"precision": 0.9018280741401717, "recall": 0.8920886075949367, "f1-score": 0.8951158382824038, "support": 31600.0}}
meta_data/meta_s42_e4_cvi0.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6454043194374686, "recall": 0.6030032848427969, "f1-score": 0.6234837457544881, "support": 4262.0}, "MajorClaim": {"precision": 0.8682170542635659, "recall": 0.7759815242494227, "f1-score": 0.8195121951219513, "support": 2165.0}, "O": {"precision": 0.9972021066491112, "recall": 0.9986813911323553, "f1-score": 0.9979412006917566, "support": 12134.0}, "Premise": {"precision": 0.8898085876875323, "recall": 0.9233836950686403, "f1-score": 0.9062852841550622, "support": 13039.0}, "accuracy": 0.8989873417721519, "macro avg": {"precision": 0.8501580170094195, "recall": 0.8252624738233038, "f1-score": 0.8368056064308145, "support": 31600.0}, "weighted avg": {"precision": 0.8966034072424418, "recall": 0.8989873417721519, "f1-score": 0.8973925308903887, "support": 31600.0}}
meta_data/meta_s42_e4_cvi1.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6517270910648016, "recall": 0.524345429490124, "f1-score": 0.5811378388697976, "support": 4354.0}, "MajorClaim": {"precision": 0.8473127035830619, "recall": 0.8859088974031503, "f1-score": 0.8661810613943809, "support": 2349.0}, "O": {"precision": 0.9998404467491026, "recall": 0.9997606892150607, "f1-score": 0.9998005663914483, "support": 12536.0}, "Premise": {"precision": 0.8757702386854593, "recall": 0.9245551069238822, "f1-score": 0.8995016913396138, "support": 13374.0}, "accuracy": 0.8972495630576763, "macro avg": {"precision": 0.8436626200206063, "recall": 0.8336425307580544, "f1-score": 0.8366552894988102, "support": 32613.0}, "weighted avg": {"precision": 0.8915005766976004, "recall": 0.8972495630576763, "f1-score": 0.8931520860979881, "support": 32613.0}}
meta_data/meta_s42_e4_cvi2.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6552830621132248, "recall": 0.6273864384463462, "f1-score": 0.6410313901345291, "support": 4557.0}, "MajorClaim": {"precision": 0.8519823788546256, "recall": 0.8523578669017188, "f1-score": 0.8521700815157525, "support": 2269.0}, "O": {"precision": 0.9993671458276828, "recall": 0.998464456688646, "f1-score": 0.99891559732514, "support": 11071.0}, "Premise": {"precision": 0.9035081766981068, "recall": 0.9161277005641943, "f1-score": 0.9097741792217554, "support": 14534.0}, "accuracy": 0.8992013813943449, "macro avg": {"precision": 0.85253519087341, "recall": 0.8485841156502263, "f1-score": 0.8504728120492941, "support": 32431.0}, "weighted avg": {"precision": 0.8977476625222679, "recall": 0.8992013813943449, "f1-score": 0.8984122123461475, "support": 32431.0}}
meta_data/meta_s42_e4_cvi3.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6819186560565871, "recall": 0.6244939271255061, "f1-score": 0.6519442096365173, "support": 4940.0}, "MajorClaim": {"precision": 0.8671841310909875, "recall": 0.9191042047531993, "f1-score": 0.8923896161526514, "support": 2188.0}, "O": {"precision": 0.9975258659469186, "recall": 0.9964053021792856, "f1-score": 0.9969652691918625, "support": 13353.0}, "Premise": {"precision": 0.9019692573615655, "recall": 0.9189886156362035, "f1-score": 0.9103994018318897, "support": 15899.0}, "accuracy": 0.9074216602528862, "macro avg": {"precision": 0.8621494776140147, "recall": 0.8647480124235486, "f1-score": 0.8629246242032302, "support": 36380.0}, "weighted avg": {"precision": 0.9050700701079537, "recall": 0.9074216602528862, "f1-score": 0.9059942332322857, "support": 36380.0}}
meta_data/meta_s42_e4_cvi4.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.6146161934805467, "recall": 0.5609404990403071, "f1-score": 0.5865529352734571, "support": 4168.0}, "MajorClaim": {"precision": 0.8342541436464088, "recall": 0.8420074349442379, "f1-score": 0.8381128584643849, "support": 2152.0}, "O": {"precision": 0.9999115122555526, "recall": 0.998939179632249, "f1-score": 0.9994251094503163, "support": 11312.0}, "Premise": {"precision": 0.8800289668490505, "recall": 0.9059057400811729, "f1-score": 0.8927798865352434, "support": 12073.0}, "accuracy": 0.888301632721764, "macro avg": {"precision": 0.8322027040578897, "recall": 0.8269482134244918, "f1-score": 0.8292176974308504, "support": 29705.0}, "weighted avg": {"precision": 0.8851245229744956, "recall": 0.888301632721764, "f1-score": 0.8864635554242416, "support": 29705.0}}
meta_data/meta_s42_e5_cvi0.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"Claim": {"precision": 0.64, "recall": 0.6006569685593618, "f1-score": 0.6197046719922537, "support": 4262.0}, "MajorClaim": {"precision": 0.8654327163581791, "recall": 0.7990762124711316, "f1-score": 0.8309317963496637, "support": 2165.0}, "O": {"precision": 0.9973673385438091, "recall": 0.9990934564034943, "f1-score": 0.9982296512824735, "support": 12134.0}, "Premise": {"precision": 0.8908969210174029, "recall": 0.9187054221949535, "f1-score": 0.9045875023598263, "support": 13039.0}, "accuracy": 0.8984810126582279, "macro avg": {"precision": 0.8484242439798478, "recall": 0.8293830149072354, "f1-score": 0.8383634054960544, "support": 31600.0}, "weighted avg": {"precision": 0.8961962680364542, "recall": 0.8984810126582279, "f1-score": 0.8970754330082062, "support": 31600.0}}