Fixing some errors of the leaderboard evaluation results in the ModelCard yaml

#6
Files changed (1) hide show
  1. README.md +27 -1
README.md CHANGED
@@ -65,6 +65,19 @@ model-index:
65
  - type: f1_macro
66
  value: 46.68
67
  name: f1-macro
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  - type: pearson
69
  value: 1.89
70
  name: pearson
@@ -92,7 +105,7 @@ model-index:
92
  name: Text Generation
93
  dataset:
94
  name: HateBR Binary
95
- type: eduagarcia/portuguese_benchmark
96
  split: test
97
  args:
98
  num_few_shot: 25
@@ -100,6 +113,19 @@ model-index:
100
  - type: f1_macro
101
  value: 61.93
102
  name: f1-macro
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  - type: f1_macro
104
  value: 64.13
105
  name: f1-macro
 
65
  - type: f1_macro
66
  value: 46.68
67
  name: f1-macro
68
+ source:
69
+ url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=maritaca-ai/sabia-7b
70
+ name: Open Portuguese LLM Leaderboard
71
+ - task:
72
+ type: text-generation
73
+ name: Text Generation
74
+ dataset:
75
+ name: Assin2 STS
76
+ type: eduagarcia/portuguese_benchmark
77
+ split: test
78
+ args:
79
+ num_few_shot: 15
80
+ metrics:
81
  - type: pearson
82
  value: 1.89
83
  name: pearson
 
105
  name: Text Generation
106
  dataset:
107
  name: HateBR Binary
108
+ type: ruanchaves/hatebr
109
  split: test
110
  args:
111
  num_few_shot: 25
 
113
  - type: f1_macro
114
  value: 61.93
115
  name: f1-macro
116
+ source:
117
+ url: https://huggingface.co/spaces/eduagarcia/open_pt_llm_leaderboard?query=maritaca-ai/sabia-7b
118
+ name: Open Portuguese LLM Leaderboard
119
+ - task:
120
+ type: text-generation
121
+ name: Text Generation
122
+ dataset:
123
+ name: PT Hate Speech Binary
124
+ type: hate_speech_portuguese
125
+ split: test
126
+ args:
127
+ num_few_shot: 25
128
+ metrics:
129
  - type: f1_macro
130
  value: 64.13
131
  name: f1-macro