Suparious commited on
Commit
187a2e4
1 Parent(s): 17d1812

Updated and moved existing to merged_models base_model tag in README.md

Browse files
Files changed (1) hide show
  1. README.md +75 -74
README.md CHANGED
@@ -1,131 +1,121 @@
1
  ---
 
 
 
 
 
2
  license: cc-by-nc-4.0
3
- tags:
4
- - merge
5
- - mergekit
6
- - lazymergekit
7
- - quantized
8
- - 4-bit
9
- - AWQ
10
- - text-generation
11
- - autotrain_compatible
12
- - endpoints_compatible
13
- - chatml
14
- base_model:
15
  - mlabonne/OmniTruthyBeagle-7B-v0
16
  - mlabonne/NeuBeagle-7B
17
  - mlabonne/NeuralOmniBeagle-7B
18
  model-index:
19
  - name: Monarch-7B
20
  results:
21
- - task:
22
- type: text-generation
23
- name: Text Generation
24
- dataset:
25
- name: AI2 Reasoning Challenge (25-Shot)
26
- type: ai2_arc
27
- config: ARC-Challenge
28
- split: test
29
  args:
30
  num_few_shot: 25
 
 
 
 
31
  metrics:
32
- - type: acc_norm
 
33
  value: 73.04
34
- name: normalized accuracy
35
  source:
36
- url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/Monarch-7B
37
  name: Open LLM Leaderboard
38
- - task:
39
- type: text-generation
40
  name: Text Generation
41
- dataset:
42
- name: HellaSwag (10-Shot)
43
- type: hellaswag
44
- split: validation
45
  args:
46
  num_few_shot: 10
 
 
 
47
  metrics:
48
- - type: acc_norm
 
49
  value: 89.03
50
- name: normalized accuracy
51
  source:
52
- url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/Monarch-7B
53
  name: Open LLM Leaderboard
54
- - task:
55
- type: text-generation
56
  name: Text Generation
57
- dataset:
58
- name: MMLU (5-Shot)
59
- type: cais/mmlu
60
- config: all
61
- split: test
62
  args:
63
  num_few_shot: 5
 
 
 
 
64
  metrics:
65
- - type: acc
 
66
  value: 64.41
67
- name: accuracy
68
  source:
69
- url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/Monarch-7B
70
  name: Open LLM Leaderboard
71
- - task:
72
- type: text-generation
73
  name: Text Generation
74
- dataset:
75
- name: TruthfulQA (0-shot)
76
- type: truthful_qa
77
- config: multiple_choice
78
- split: validation
79
  args:
80
  num_few_shot: 0
 
 
 
 
81
  metrics:
82
  - type: mc2
83
  value: 77.35
84
  source:
85
- url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/Monarch-7B
86
  name: Open LLM Leaderboard
87
- - task:
88
- type: text-generation
89
  name: Text Generation
90
- dataset:
91
- name: Winogrande (5-shot)
92
- type: winogrande
93
- config: winogrande_xl
94
- split: validation
95
  args:
96
  num_few_shot: 5
 
 
 
 
97
  metrics:
98
- - type: acc
 
99
  value: 84.61
100
- name: accuracy
101
  source:
102
- url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/Monarch-7B
103
  name: Open LLM Leaderboard
104
- - task:
105
- type: text-generation
106
  name: Text Generation
107
- dataset:
108
- name: GSM8k (5-shot)
109
- type: gsm8k
110
- config: main
111
- split: test
112
  args:
113
  num_few_shot: 5
 
 
 
 
114
  metrics:
115
- - type: acc
 
116
  value: 69.07
117
- name: accuracy
118
  source:
119
- url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/Monarch-7B
120
  name: Open LLM Leaderboard
121
- library_name: transformers
122
- language:
123
- - en
 
124
  model_creator: mlabonne
125
  model_name: Darewin-7B
126
  model_type: mistral
127
  pipeline_tag: text-generation
128
- inference: false
129
  prompt_template: '<|im_start|>system
130
 
131
  {system_message}<|im_end|>
@@ -138,6 +128,17 @@ prompt_template: '<|im_start|>system
138
 
139
  '
140
  quantized_by: Suparious
 
 
 
 
 
 
 
 
 
 
 
141
  ---
142
  # mlabonne/Monarch-7B AWQ
143
 
 
1
  ---
2
+ base_model: mlabonne/Monarch-7B
3
+ inference: false
4
+ language:
5
+ - en
6
+ library_name: transformers
7
  license: cc-by-nc-4.0
8
+ merged_models:
 
 
 
 
 
 
 
 
 
 
 
9
  - mlabonne/OmniTruthyBeagle-7B-v0
10
  - mlabonne/NeuBeagle-7B
11
  - mlabonne/NeuralOmniBeagle-7B
12
  model-index:
13
  - name: Monarch-7B
14
  results:
15
+ - dataset:
 
 
 
 
 
 
 
16
  args:
17
  num_few_shot: 25
18
+ config: ARC-Challenge
19
+ name: AI2 Reasoning Challenge (25-Shot)
20
+ split: test
21
+ type: ai2_arc
22
  metrics:
23
+ - name: normalized accuracy
24
+ type: acc_norm
25
  value: 73.04
 
26
  source:
 
27
  name: Open LLM Leaderboard
28
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/Monarch-7B
29
+ task:
30
  name: Text Generation
31
+ type: text-generation
32
+ - dataset:
 
 
33
  args:
34
  num_few_shot: 10
35
+ name: HellaSwag (10-Shot)
36
+ split: validation
37
+ type: hellaswag
38
  metrics:
39
+ - name: normalized accuracy
40
+ type: acc_norm
41
  value: 89.03
 
42
  source:
 
43
  name: Open LLM Leaderboard
44
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/Monarch-7B
45
+ task:
46
  name: Text Generation
47
+ type: text-generation
48
+ - dataset:
 
 
 
49
  args:
50
  num_few_shot: 5
51
+ config: all
52
+ name: MMLU (5-Shot)
53
+ split: test
54
+ type: cais/mmlu
55
  metrics:
56
+ - name: accuracy
57
+ type: acc
58
  value: 64.41
 
59
  source:
 
60
  name: Open LLM Leaderboard
61
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/Monarch-7B
62
+ task:
63
  name: Text Generation
64
+ type: text-generation
65
+ - dataset:
 
 
 
66
  args:
67
  num_few_shot: 0
68
+ config: multiple_choice
69
+ name: TruthfulQA (0-shot)
70
+ split: validation
71
+ type: truthful_qa
72
  metrics:
73
  - type: mc2
74
  value: 77.35
75
  source:
 
76
  name: Open LLM Leaderboard
77
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/Monarch-7B
78
+ task:
79
  name: Text Generation
80
+ type: text-generation
81
+ - dataset:
 
 
 
82
  args:
83
  num_few_shot: 5
84
+ config: winogrande_xl
85
+ name: Winogrande (5-shot)
86
+ split: validation
87
+ type: winogrande
88
  metrics:
89
+ - name: accuracy
90
+ type: acc
91
  value: 84.61
 
92
  source:
 
93
  name: Open LLM Leaderboard
94
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/Monarch-7B
95
+ task:
96
  name: Text Generation
97
+ type: text-generation
98
+ - dataset:
 
 
 
99
  args:
100
  num_few_shot: 5
101
+ config: main
102
+ name: GSM8k (5-shot)
103
+ split: test
104
+ type: gsm8k
105
  metrics:
106
+ - name: accuracy
107
+ type: acc
108
  value: 69.07
 
109
  source:
 
110
  name: Open LLM Leaderboard
111
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=mlabonne/Monarch-7B
112
+ task:
113
+ name: Text Generation
114
+ type: text-generation
115
  model_creator: mlabonne
116
  model_name: Darewin-7B
117
  model_type: mistral
118
  pipeline_tag: text-generation
 
119
  prompt_template: '<|im_start|>system
120
 
121
  {system_message}<|im_end|>
 
128
 
129
  '
130
  quantized_by: Suparious
131
+ tags:
132
+ - merge
133
+ - mergekit
134
+ - lazymergekit
135
+ - quantized
136
+ - 4-bit
137
+ - AWQ
138
+ - text-generation
139
+ - autotrain_compatible
140
+ - endpoints_compatible
141
+ - chatml
142
  ---
143
  # mlabonne/Monarch-7B AWQ
144