Convert TinyLlama/TinyLlama-1.1B-Chat-v1.0 to GGUF weights using llamafile-quantize 1d9fa85f0c136d81c6684484c05582e3f4801b21
Browse files- .gitattributes +1 -0
- TinyLlama-1.1B-Chat-v1.0.Q3_K_L.gguf +3 -0
.gitattributes
CHANGED
@@ -34,3 +34,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
TinyLlama-1.1B-Chat-v1.0.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
TinyLlama-1.1B-Chat-v1.0.Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
|
37 |
+
TinyLlama-1.1B-Chat-v1.0.Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
|
TinyLlama-1.1B-Chat-v1.0.Q3_K_L.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:70309e7f5eef49bfb1930e939c56d704847da678b53151965560f48e038c1165
|
3 |
+
size 592500064
|