Update README.md
Browse files
README.md
CHANGED
@@ -24,7 +24,7 @@ Original model: https://huggingface.co/google/Qwen2-7B
|
|
24 |
| [Qwen2-7B-Q8_0.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q8_0.gguf) | Q8_0 | 8.1GB | 7.3817 +/- 0.04777 |
|
25 |
| [Qwen2-7B-Q6_K.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q6_K.gguf) | Q6_K | 6.25GB | 7.3914 +/- 0.04776 |
|
26 |
| [Qwen2-7B-Q5_K_M.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q5_K_M.gguf) | Q5_K_M | 5.44GB | 7.4067 +/- 0.04794 |
|
27 |
-
| [Qwen2-7B-Q5_K_S.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q5_K_S.gguf) | Q5_K_S |
|
28 |
| [Qwen2-7B-Q4_K_M.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q4_K_M.gguf) | Q4_K_M | 4.68GB | 7.4796 +/- 0.04856 |
|
29 |
| [Qwen2-7B-Q4_K_S.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q4_K_S.gguf) | Q4_K_S | 4.46GB | 7.5221 +/- 0.04879 |
|
30 |
| [Qwen2-7B-Q3_K_L.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q3_K_L.gguf) | Q3_K_L | 4.09GB | 7.6843 +/- 0.05000 |
|
|
|
24 |
| [Qwen2-7B-Q8_0.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q8_0.gguf) | Q8_0 | 8.1GB | 7.3817 +/- 0.04777 |
|
25 |
| [Qwen2-7B-Q6_K.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q6_K.gguf) | Q6_K | 6.25GB | 7.3914 +/- 0.04776 |
|
26 |
| [Qwen2-7B-Q5_K_M.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q5_K_M.gguf) | Q5_K_M | 5.44GB | 7.4067 +/- 0.04794 |
|
27 |
+
| [Qwen2-7B-Q5_K_S.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q5_K_S.gguf) | Q5_K_S | 5.32GB | 7.4291 +/- 0.04822 |
|
28 |
| [Qwen2-7B-Q4_K_M.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q4_K_M.gguf) | Q4_K_M | 4.68GB | 7.4796 +/- 0.04856 |
|
29 |
| [Qwen2-7B-Q4_K_S.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q4_K_S.gguf) | Q4_K_S | 4.46GB | 7.5221 +/- 0.04879 |
|
30 |
| [Qwen2-7B-Q3_K_L.gguf](https://huggingface.co/fedric95/Qwen2-7B-GGUF/blob/main/Qwen2-7B-Q3_K_L.gguf) | Q3_K_L | 4.09GB | 7.6843 +/- 0.05000 |
|