Suparious commited on
Commit
29064b1
1 Parent(s): bd90544

Updated and moved existing to merged_models base_model tag in README.md

Browse files
Files changed (1) hide show
  1. README.md +126 -131
README.md CHANGED
@@ -1,142 +1,120 @@
1
  ---
2
- tags:
3
- - finetuned
4
- - quantized
5
- - 4-bit
6
- - AWQ
7
- - transformers
8
- - pytorch
9
- - mistral
10
- - instruct
11
- - text-generation
12
- - conversational
13
- - license:apache-2.0
14
- - autotrain_compatible
15
- - endpoints_compatible
16
- - text-generation-inference
17
- - finetune
18
- - chatml
19
- model-index:
20
- - name: OpenHercules-2.5-Mistral-7B
21
- results:
22
- - task:
23
- type: text-generation
24
- name: Text Generation
25
- dataset:
26
- name: AI2 Reasoning Challenge (25-Shot)
27
- type: ai2_arc
28
- config: ARC-Challenge
29
- split: test
30
- args:
31
- num_few_shot: 25
32
- metrics:
33
- - type: acc_norm
34
- value: 64.25
35
- name: normalized accuracy
36
- source:
37
- url: >-
38
- https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/OpenHercules-2.5-Mistral-7B
39
- name: Open LLM Leaderboard
40
- - task:
41
- type: text-generation
42
- name: Text Generation
43
- dataset:
44
- name: HellaSwag (10-Shot)
45
- type: hellaswag
46
- split: validation
47
- args:
48
- num_few_shot: 10
49
- metrics:
50
- - type: acc_norm
51
- value: 84.84
52
- name: normalized accuracy
53
- source:
54
- url: >-
55
- https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/OpenHercules-2.5-Mistral-7B
56
- name: Open LLM Leaderboard
57
- - task:
58
- type: text-generation
59
- name: Text Generation
60
- dataset:
61
- name: MMLU (5-Shot)
62
- type: cais/mmlu
63
- config: all
64
- split: test
65
- args:
66
- num_few_shot: 5
67
- metrics:
68
- - type: acc
69
- value: 64.21
70
- name: accuracy
71
- source:
72
- url: >-
73
- https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/OpenHercules-2.5-Mistral-7B
74
- name: Open LLM Leaderboard
75
- - task:
76
- type: text-generation
77
- name: Text Generation
78
- dataset:
79
- name: TruthfulQA (0-shot)
80
- type: truthful_qa
81
- config: multiple_choice
82
- split: validation
83
- args:
84
- num_few_shot: 0
85
- metrics:
86
- - type: mc2
87
- value: 47.84
88
- source:
89
- url: >-
90
- https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/OpenHercules-2.5-Mistral-7B
91
- name: Open LLM Leaderboard
92
- - task:
93
- type: text-generation
94
- name: Text Generation
95
- dataset:
96
- name: Winogrande (5-shot)
97
- type: winogrande
98
- config: winogrande_xl
99
- split: validation
100
- args:
101
- num_few_shot: 5
102
- metrics:
103
- - type: acc
104
- value: 78.93
105
- name: accuracy
106
- source:
107
- url: >-
108
- https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/OpenHercules-2.5-Mistral-7B
109
- name: Open LLM Leaderboard
110
- - task:
111
- type: text-generation
112
- name: Text Generation
113
- dataset:
114
- name: GSM8k (5-shot)
115
- type: gsm8k
116
- config: main
117
- split: test
118
- args:
119
- num_few_shot: 5
120
- metrics:
121
- - type: acc
122
- value: 59.21
123
- name: accuracy
124
- source:
125
- url: >-
126
- https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/OpenHercules-2.5-Mistral-7B
127
- name: Open LLM Leaderboard
128
- base_model:
129
- - Locutusque/Hercules-2.5-Mistral-7B
130
- - teknium/OpenHermes-2.5-Mistral-7B
131
- license: apache-2.0
132
  language:
133
  - en
134
  library_name: transformers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
135
  model_creator: hydra-project
136
  model_name: OpenHercules-2.5-Mistral-7B
137
  model_type: mistral
138
  pipeline_tag: text-generation
139
- inference: false
140
  prompt_template: '<|im_start|>system
141
 
142
  {system_message}<|im_end|>
@@ -149,6 +127,23 @@ prompt_template: '<|im_start|>system
149
 
150
  '
151
  quantized_by: Suparious
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
  ---
153
  # hydra-project/OpenHercules-2.5-Mistral-7B AWQ
154
 
 
1
  ---
2
+ base_model: hydra-project/OpenHercules-2.5-Mistral-7B
3
+ inference: false
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  language:
5
  - en
6
  library_name: transformers
7
+ license: apache-2.0
8
+ merged_models:
9
+ - Locutusque/Hercules-2.5-Mistral-7B
10
+ - teknium/OpenHermes-2.5-Mistral-7B
11
+ model-index:
12
+ - name: OpenHercules-2.5-Mistral-7B
13
+ results:
14
+ - dataset:
15
+ args:
16
+ num_few_shot: 25
17
+ config: ARC-Challenge
18
+ name: AI2 Reasoning Challenge (25-Shot)
19
+ split: test
20
+ type: ai2_arc
21
+ metrics:
22
+ - name: normalized accuracy
23
+ type: acc_norm
24
+ value: 64.25
25
+ source:
26
+ name: Open LLM Leaderboard
27
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/OpenHercules-2.5-Mistral-7B
28
+ task:
29
+ name: Text Generation
30
+ type: text-generation
31
+ - dataset:
32
+ args:
33
+ num_few_shot: 10
34
+ name: HellaSwag (10-Shot)
35
+ split: validation
36
+ type: hellaswag
37
+ metrics:
38
+ - name: normalized accuracy
39
+ type: acc_norm
40
+ value: 84.84
41
+ source:
42
+ name: Open LLM Leaderboard
43
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/OpenHercules-2.5-Mistral-7B
44
+ task:
45
+ name: Text Generation
46
+ type: text-generation
47
+ - dataset:
48
+ args:
49
+ num_few_shot: 5
50
+ config: all
51
+ name: MMLU (5-Shot)
52
+ split: test
53
+ type: cais/mmlu
54
+ metrics:
55
+ - name: accuracy
56
+ type: acc
57
+ value: 64.21
58
+ source:
59
+ name: Open LLM Leaderboard
60
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/OpenHercules-2.5-Mistral-7B
61
+ task:
62
+ name: Text Generation
63
+ type: text-generation
64
+ - dataset:
65
+ args:
66
+ num_few_shot: 0
67
+ config: multiple_choice
68
+ name: TruthfulQA (0-shot)
69
+ split: validation
70
+ type: truthful_qa
71
+ metrics:
72
+ - type: mc2
73
+ value: 47.84
74
+ source:
75
+ name: Open LLM Leaderboard
76
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/OpenHercules-2.5-Mistral-7B
77
+ task:
78
+ name: Text Generation
79
+ type: text-generation
80
+ - dataset:
81
+ args:
82
+ num_few_shot: 5
83
+ config: winogrande_xl
84
+ name: Winogrande (5-shot)
85
+ split: validation
86
+ type: winogrande
87
+ metrics:
88
+ - name: accuracy
89
+ type: acc
90
+ value: 78.93
91
+ source:
92
+ name: Open LLM Leaderboard
93
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/OpenHercules-2.5-Mistral-7B
94
+ task:
95
+ name: Text Generation
96
+ type: text-generation
97
+ - dataset:
98
+ args:
99
+ num_few_shot: 5
100
+ config: main
101
+ name: GSM8k (5-shot)
102
+ split: test
103
+ type: gsm8k
104
+ metrics:
105
+ - name: accuracy
106
+ type: acc
107
+ value: 59.21
108
+ source:
109
+ name: Open LLM Leaderboard
110
+ url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Locutusque/OpenHercules-2.5-Mistral-7B
111
+ task:
112
+ name: Text Generation
113
+ type: text-generation
114
  model_creator: hydra-project
115
  model_name: OpenHercules-2.5-Mistral-7B
116
  model_type: mistral
117
  pipeline_tag: text-generation
 
118
  prompt_template: '<|im_start|>system
119
 
120
  {system_message}<|im_end|>
 
127
 
128
  '
129
  quantized_by: Suparious
130
+ tags:
131
+ - finetuned
132
+ - quantized
133
+ - 4-bit
134
+ - AWQ
135
+ - transformers
136
+ - pytorch
137
+ - mistral
138
+ - instruct
139
+ - text-generation
140
+ - conversational
141
+ - license:apache-2.0
142
+ - autotrain_compatible
143
+ - endpoints_compatible
144
+ - text-generation-inference
145
+ - finetune
146
+ - chatml
147
  ---
148
  # hydra-project/OpenHercules-2.5-Mistral-7B AWQ
149