Best q8 conversion down from bf16 with slightly better perplexity than f16 based quants
Browse files
.gitattributes
CHANGED
@@ -54,3 +54,4 @@ qwen7bv2instruct_bf16.gguf filter=lfs diff=lfs merge=lfs -text
|
|
54 |
qwen7bv2inst_iq4xs_embedding8_output8.gguf filter=lfs diff=lfs merge=lfs -text
|
55 |
qwen7bv2inst_iq4xs_embedding8_outputq8.gguf filter=lfs diff=lfs merge=lfs -text
|
56 |
qwen7bv2inst_Iq4xs_output6k.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
54 |
qwen7bv2inst_iq4xs_embedding8_output8.gguf filter=lfs diff=lfs merge=lfs -text
|
55 |
qwen7bv2inst_iq4xs_embedding8_outputq8.gguf filter=lfs diff=lfs merge=lfs -text
|
56 |
qwen7bv2inst_Iq4xs_output6k.gguf filter=lfs diff=lfs merge=lfs -text
|
57 |
+
qwen7bv2instruct_q8.gguf filter=lfs diff=lfs merge=lfs -text
|
qwenv2instruct7b_q8.gguf → qwen7bv2instruct_q8.gguf
RENAMED
File without changes
|