morriszms commited on
Commit
a84539c
1 Parent(s): 9ba2f5e

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ ChimeraLlama-3-8B-v2-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ ChimeraLlama-3-8B-v2-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ ChimeraLlama-3-8B-v2-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ ChimeraLlama-3-8B-v2-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ ChimeraLlama-3-8B-v2-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ ChimeraLlama-3-8B-v2-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ ChimeraLlama-3-8B-v2-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ ChimeraLlama-3-8B-v2-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ ChimeraLlama-3-8B-v2-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ ChimeraLlama-3-8B-v2-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ ChimeraLlama-3-8B-v2-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ ChimeraLlama-3-8B-v2-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
ChimeraLlama-3-8B-v2-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6996a8f925735c5dedefe95a7740b3f1c16db8a1662561ee3502360cc2a19c0e
3
+ size 3179132640
ChimeraLlama-3-8B-v2-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29dba01d80a71f23b4aec231a7eb5cb1af3b728866b80a99213f5bae27417612
3
+ size 4321957600
ChimeraLlama-3-8B-v2-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b1ead926626f0bf7c5f6e75bab060ed9bda183aed8cf08a0e95e9193b2983901
3
+ size 4018919136
ChimeraLlama-3-8B-v2-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f3fa14c75d8bf34973d9b2b20aec171610e5541c24a9b4ad4828d55a3056d60f
3
+ size 3664500448
ChimeraLlama-3-8B-v2-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dac210c72f79c048361aad7f8cead0b2e46406d00417dadf2941070800bee24
3
+ size 4661212896
ChimeraLlama-3-8B-v2-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6d8a3d0cebe780149b4f568720d342a3cd175d73684ca871e8ef8f8de2eba21
3
+ size 4920735456
ChimeraLlama-3-8B-v2-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc90f4989bed1c6feb40f0e6381f69edb89fb7ff7885ad09dc7e5e5805220491
3
+ size 4692670176
ChimeraLlama-3-8B-v2-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e311f969bf8926910ed9ec43c0aaf9c5d10de07683efb6ba7d81e0ff4ace2e5b
3
+ size 5599295200
ChimeraLlama-3-8B-v2-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a199909a36903dc039120d631037305bd09a7a1032b64fde41280c39f8050f22
3
+ size 5732988640
ChimeraLlama-3-8B-v2-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d9ac728158e2aec71aeb436b1291f1ad5a9cd9fa5841b4c58ecbc785b67b8b4
3
+ size 5599295200
ChimeraLlama-3-8B-v2-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd63a57a4dd5e60fe165510594b9c8a4b97242d66d7fc0a543a5fb80ffaa9b6c
3
+ size 6596007648
ChimeraLlama-3-8B-v2-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1e7be39e9feb8ef403e20b6d12a0becae472e29dec2494fdaff52a8ca73628fd
3
+ size 8540772064
README.md ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: other
3
+ tags:
4
+ - merge
5
+ - mergekit
6
+ - lazymergekit
7
+ - TensorBlock
8
+ - GGUF
9
+ base_model: mlabonne/ChimeraLlama-3-8B-v2
10
+ model-index:
11
+ - name: ChimeraLlama-3-8B-v2
12
+ results:
13
+ - task:
14
+ type: text-generation
15
+ name: Text Generation
16
+ dataset:
17
+ name: IFEval (0-Shot)
18
+ type: HuggingFaceH4/ifeval
19
+ args:
20
+ num_few_shot: 0
21
+ metrics:
22
+ - type: inst_level_strict_acc and prompt_level_strict_acc
23
+ value: 44.69
24
+ name: strict accuracy
25
+ source:
26
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=mlabonne/ChimeraLlama-3-8B-v2
27
+ name: Open LLM Leaderboard
28
+ - task:
29
+ type: text-generation
30
+ name: Text Generation
31
+ dataset:
32
+ name: BBH (3-Shot)
33
+ type: BBH
34
+ args:
35
+ num_few_shot: 3
36
+ metrics:
37
+ - type: acc_norm
38
+ value: 28.48
39
+ name: normalized accuracy
40
+ source:
41
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=mlabonne/ChimeraLlama-3-8B-v2
42
+ name: Open LLM Leaderboard
43
+ - task:
44
+ type: text-generation
45
+ name: Text Generation
46
+ dataset:
47
+ name: MATH Lvl 5 (4-Shot)
48
+ type: hendrycks/competition_math
49
+ args:
50
+ num_few_shot: 4
51
+ metrics:
52
+ - type: exact_match
53
+ value: 8.31
54
+ name: exact match
55
+ source:
56
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=mlabonne/ChimeraLlama-3-8B-v2
57
+ name: Open LLM Leaderboard
58
+ - task:
59
+ type: text-generation
60
+ name: Text Generation
61
+ dataset:
62
+ name: GPQA (0-shot)
63
+ type: Idavidrein/gpqa
64
+ args:
65
+ num_few_shot: 0
66
+ metrics:
67
+ - type: acc_norm
68
+ value: 4.7
69
+ name: acc_norm
70
+ source:
71
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=mlabonne/ChimeraLlama-3-8B-v2
72
+ name: Open LLM Leaderboard
73
+ - task:
74
+ type: text-generation
75
+ name: Text Generation
76
+ dataset:
77
+ name: MuSR (0-shot)
78
+ type: TAUR-Lab/MuSR
79
+ args:
80
+ num_few_shot: 0
81
+ metrics:
82
+ - type: acc_norm
83
+ value: 5.25
84
+ name: acc_norm
85
+ source:
86
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=mlabonne/ChimeraLlama-3-8B-v2
87
+ name: Open LLM Leaderboard
88
+ - task:
89
+ type: text-generation
90
+ name: Text Generation
91
+ dataset:
92
+ name: MMLU-PRO (5-shot)
93
+ type: TIGER-Lab/MMLU-Pro
94
+ config: main
95
+ split: test
96
+ args:
97
+ num_few_shot: 5
98
+ metrics:
99
+ - type: acc
100
+ value: 28.54
101
+ name: accuracy
102
+ source:
103
+ url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=mlabonne/ChimeraLlama-3-8B-v2
104
+ name: Open LLM Leaderboard
105
+ ---
106
+
107
+ <div style="width: auto; margin-left: auto; margin-right: auto">
108
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
109
+ </div>
110
+ <div style="display: flex; justify-content: space-between; width: 100%;">
111
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
112
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
113
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
114
+ </p>
115
+ </div>
116
+ </div>
117
+
118
+ ## mlabonne/ChimeraLlama-3-8B-v2 - GGUF
119
+
120
+ This repo contains GGUF format model files for [mlabonne/ChimeraLlama-3-8B-v2](https://huggingface.co/mlabonne/ChimeraLlama-3-8B-v2).
121
+
122
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
123
+
124
+ ## Prompt template
125
+
126
+ ```
127
+
128
+ ```
129
+
130
+ ## Model file specification
131
+
132
+ | Filename | Quant type | File Size | Description |
133
+ | -------- | ---------- | --------- | ----------- |
134
+ | [ChimeraLlama-3-8B-v2-Q2_K.gguf](https://huggingface.co/tensorblock/ChimeraLlama-3-8B-v2-GGUF/tree/main/ChimeraLlama-3-8B-v2-Q2_K.gguf) | Q2_K | 2.961 GB | smallest, significant quality loss - not recommended for most purposes |
135
+ | [ChimeraLlama-3-8B-v2-Q3_K_S.gguf](https://huggingface.co/tensorblock/ChimeraLlama-3-8B-v2-GGUF/tree/main/ChimeraLlama-3-8B-v2-Q3_K_S.gguf) | Q3_K_S | 3.413 GB | very small, high quality loss |
136
+ | [ChimeraLlama-3-8B-v2-Q3_K_M.gguf](https://huggingface.co/tensorblock/ChimeraLlama-3-8B-v2-GGUF/tree/main/ChimeraLlama-3-8B-v2-Q3_K_M.gguf) | Q3_K_M | 3.743 GB | very small, high quality loss |
137
+ | [ChimeraLlama-3-8B-v2-Q3_K_L.gguf](https://huggingface.co/tensorblock/ChimeraLlama-3-8B-v2-GGUF/tree/main/ChimeraLlama-3-8B-v2-Q3_K_L.gguf) | Q3_K_L | 4.025 GB | small, substantial quality loss |
138
+ | [ChimeraLlama-3-8B-v2-Q4_0.gguf](https://huggingface.co/tensorblock/ChimeraLlama-3-8B-v2-GGUF/tree/main/ChimeraLlama-3-8B-v2-Q4_0.gguf) | Q4_0 | 4.341 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
139
+ | [ChimeraLlama-3-8B-v2-Q4_K_S.gguf](https://huggingface.co/tensorblock/ChimeraLlama-3-8B-v2-GGUF/tree/main/ChimeraLlama-3-8B-v2-Q4_K_S.gguf) | Q4_K_S | 4.370 GB | small, greater quality loss |
140
+ | [ChimeraLlama-3-8B-v2-Q4_K_M.gguf](https://huggingface.co/tensorblock/ChimeraLlama-3-8B-v2-GGUF/tree/main/ChimeraLlama-3-8B-v2-Q4_K_M.gguf) | Q4_K_M | 4.583 GB | medium, balanced quality - recommended |
141
+ | [ChimeraLlama-3-8B-v2-Q5_0.gguf](https://huggingface.co/tensorblock/ChimeraLlama-3-8B-v2-GGUF/tree/main/ChimeraLlama-3-8B-v2-Q5_0.gguf) | Q5_0 | 5.215 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
142
+ | [ChimeraLlama-3-8B-v2-Q5_K_S.gguf](https://huggingface.co/tensorblock/ChimeraLlama-3-8B-v2-GGUF/tree/main/ChimeraLlama-3-8B-v2-Q5_K_S.gguf) | Q5_K_S | 5.215 GB | large, low quality loss - recommended |
143
+ | [ChimeraLlama-3-8B-v2-Q5_K_M.gguf](https://huggingface.co/tensorblock/ChimeraLlama-3-8B-v2-GGUF/tree/main/ChimeraLlama-3-8B-v2-Q5_K_M.gguf) | Q5_K_M | 5.339 GB | large, very low quality loss - recommended |
144
+ | [ChimeraLlama-3-8B-v2-Q6_K.gguf](https://huggingface.co/tensorblock/ChimeraLlama-3-8B-v2-GGUF/tree/main/ChimeraLlama-3-8B-v2-Q6_K.gguf) | Q6_K | 6.143 GB | very large, extremely low quality loss |
145
+ | [ChimeraLlama-3-8B-v2-Q8_0.gguf](https://huggingface.co/tensorblock/ChimeraLlama-3-8B-v2-GGUF/tree/main/ChimeraLlama-3-8B-v2-Q8_0.gguf) | Q8_0 | 7.954 GB | very large, extremely low quality loss - not recommended |
146
+
147
+
148
+ ## Downloading instruction
149
+
150
+ ### Command line
151
+
152
+ Firstly, install Huggingface Client
153
+
154
+ ```shell
155
+ pip install -U "huggingface_hub[cli]"
156
+ ```
157
+
158
+ Then, downoad the individual model file the a local directory
159
+
160
+ ```shell
161
+ huggingface-cli download tensorblock/ChimeraLlama-3-8B-v2-GGUF --include "ChimeraLlama-3-8B-v2-Q2_K.gguf" --local-dir MY_LOCAL_DIR
162
+ ```
163
+
164
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
165
+
166
+ ```shell
167
+ huggingface-cli download tensorblock/ChimeraLlama-3-8B-v2-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
168
+ ```