Upload glm-4-9b-chat-1m-IQ2_M.gguf with huggingface_hub
Browse files- .gitattributes +1 -0
- glm-4-9b-chat-1m-IQ2_M.gguf +3 -0
.gitattributes
CHANGED
@@ -55,3 +55,4 @@ glm-4-9b-chat-1m-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
|
|
55 |
glm-4-9b-chat-1m-IQ3_XS.gguf filter=lfs diff=lfs merge=lfs -text
|
56 |
glm-4-9b-chat-1m-Q2_K_L.gguf filter=lfs diff=lfs merge=lfs -text
|
57 |
glm-4-9b-chat-1m-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
55 |
glm-4-9b-chat-1m-IQ3_XS.gguf filter=lfs diff=lfs merge=lfs -text
|
56 |
glm-4-9b-chat-1m-Q2_K_L.gguf filter=lfs diff=lfs merge=lfs -text
|
57 |
glm-4-9b-chat-1m-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
|
58 |
+
glm-4-9b-chat-1m-IQ2_M.gguf filter=lfs diff=lfs merge=lfs -text
|
glm-4-9b-chat-1m-IQ2_M.gguf
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5affa2a0ee648922db2a630dbe02b7e88b7ba0a4ec686b36b4ee24e9061ef32e
|
3 |
+
size 3958493984
|