Update README.md
Browse files
README.md
CHANGED
@@ -6,9 +6,24 @@ tags:
|
|
6 |
metrics:
|
7 |
- rouge
|
8 |
- bleu
|
|
|
|
|
|
|
9 |
model-index:
|
10 |
-
- name: mt5-
|
11 |
-
results:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
---
|
13 |
|
14 |
# mt5-small_test_45
|
@@ -85,4 +100,4 @@ The following hyperparameters were used during training:
|
|
85 |
- Transformers 4.31.0
|
86 |
- Pytorch 2.0.1+cu118
|
87 |
- Datasets 2.13.1
|
88 |
-
- Tokenizers 0.13.3
|
|
|
6 |
metrics:
|
7 |
- rouge
|
8 |
- bleu
|
9 |
+
- meteor
|
10 |
+
datasets:
|
11 |
+
- natural_questions
|
12 |
model-index:
|
13 |
+
- name: mt5-small
|
14 |
+
results:
|
15 |
+
- task:
|
16 |
+
type: Question answering from context # Required. Example: automatic-speech-recognition
|
17 |
+
name: Question answering # Optional. Example: Speech Recognition
|
18 |
+
dataset:
|
19 |
+
type: natural-questions # Required. Example: common_voice. Use dataset id from https://hf.co/datasets
|
20 |
+
name: Adapted Natural Questions # Required. A pretty name for the dataset. Example: Common Voice (French)
|
21 |
+
metrics:
|
22 |
+
- type: bleu # Required. Example: wer. Use metric id from https://hf.co/metrics
|
23 |
+
value: 34.1596 # Required. Example: 20.90
|
24 |
+
name: BLEU # Optional. Example: Test WER
|
25 |
+
verified: true # Optional. If true, indicates that evaluation was generated by Hugging Face (vs. self-reported).
|
26 |
+
|
27 |
---
|
28 |
|
29 |
# mt5-small_test_45
|
|
|
100 |
- Transformers 4.31.0
|
101 |
- Pytorch 2.0.1+cu118
|
102 |
- Datasets 2.13.1
|
103 |
+
- Tokenizers 0.13.3
|