morriszms commited on
Commit
8372405
1 Parent(s): 5cd2207

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,15 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ granite-34b-code-base-8k-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
37
+ granite-34b-code-base-8k-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
38
+ granite-34b-code-base-8k-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
39
+ granite-34b-code-base-8k-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
40
+ granite-34b-code-base-8k-Q4_0.gguf filter=lfs diff=lfs merge=lfs -text
41
+ granite-34b-code-base-8k-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ granite-34b-code-base-8k-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
43
+ granite-34b-code-base-8k-Q5_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ granite-34b-code-base-8k-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
45
+ granite-34b-code-base-8k-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
46
+ granite-34b-code-base-8k-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
47
+ granite-34b-code-base-8k-Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: text-generation
3
+ inference: true
4
+ license: apache-2.0
5
+ datasets:
6
+ - codeparrot/github-code-clean
7
+ - bigcode/starcoderdata
8
+ - open-web-math/open-web-math
9
+ - math-ai/StackMathQA
10
+ metrics:
11
+ - code_eval
12
+ library_name: transformers
13
+ tags:
14
+ - code
15
+ - granite
16
+ - TensorBlock
17
+ - GGUF
18
+ base_model: ibm-granite/granite-34b-code-base-8k
19
+ model-index:
20
+ - name: granite-34b-code-base-8k
21
+ results:
22
+ - task:
23
+ type: text-generation
24
+ dataset:
25
+ name: MBPP
26
+ type: mbpp
27
+ metrics:
28
+ - type: pass@1
29
+ value: 47.2
30
+ name: pass@1
31
+ - task:
32
+ type: text-generation
33
+ dataset:
34
+ name: MBPP+
35
+ type: evalplus/mbppplus
36
+ metrics:
37
+ - type: pass@1
38
+ value: 53.1
39
+ name: pass@1
40
+ - task:
41
+ type: text-generation
42
+ dataset:
43
+ name: HumanEvalSynthesis(Python)
44
+ type: bigcode/humanevalpack
45
+ metrics:
46
+ - type: pass@1
47
+ value: 48.2
48
+ name: pass@1
49
+ - type: pass@1
50
+ value: 54.9
51
+ name: pass@1
52
+ - type: pass@1
53
+ value: 61.6
54
+ name: pass@1
55
+ - type: pass@1
56
+ value: 40.2
57
+ name: pass@1
58
+ - type: pass@1
59
+ value: 50.0
60
+ name: pass@1
61
+ - type: pass@1
62
+ value: 39.6
63
+ name: pass@1
64
+ - type: pass@1
65
+ value: 42.7
66
+ name: pass@1
67
+ - type: pass@1
68
+ value: 26.2
69
+ name: pass@1
70
+ - type: pass@1
71
+ value: 47.0
72
+ name: pass@1
73
+ - type: pass@1
74
+ value: 26.8
75
+ name: pass@1
76
+ - type: pass@1
77
+ value: 36.6
78
+ name: pass@1
79
+ - type: pass@1
80
+ value: 25.0
81
+ name: pass@1
82
+ - type: pass@1
83
+ value: 20.1
84
+ name: pass@1
85
+ - type: pass@1
86
+ value: 30.5
87
+ name: pass@1
88
+ - type: pass@1
89
+ value: 40.9
90
+ name: pass@1
91
+ - type: pass@1
92
+ value: 34.1
93
+ name: pass@1
94
+ - type: pass@1
95
+ value: 39.0
96
+ name: pass@1
97
+ - type: pass@1
98
+ value: 12.2
99
+ name: pass@1
100
+ ---
101
+
102
+ <div style="width: auto; margin-left: auto; margin-right: auto">
103
+ <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;">
104
+ </div>
105
+ <div style="display: flex; justify-content: space-between; width: 100%;">
106
+ <div style="display: flex; flex-direction: column; align-items: flex-start;">
107
+ <p style="margin-top: 0.5em; margin-bottom: 0em;">
108
+ Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a>
109
+ </p>
110
+ </div>
111
+ </div>
112
+
113
+ ## ibm-granite/granite-34b-code-base-8k - GGUF
114
+
115
+ This repo contains GGUF format model files for [ibm-granite/granite-34b-code-base-8k](https://huggingface.co/ibm-granite/granite-34b-code-base-8k).
116
+
117
+ The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d).
118
+
119
+ ## Prompt template
120
+
121
+ ```
122
+
123
+ ```
124
+
125
+ ## Model file specification
126
+
127
+ | Filename | Quant type | File Size | Description |
128
+ | -------- | ---------- | --------- | ----------- |
129
+ | [granite-34b-code-base-8k-Q2_K.gguf](https://huggingface.co/tensorblock/granite-34b-code-base-8k-GGUF/tree/main/granite-34b-code-base-8k-Q2_K.gguf) | Q2_K | 12.207 GB | smallest, significant quality loss - not recommended for most purposes |
130
+ | [granite-34b-code-base-8k-Q3_K_S.gguf](https://huggingface.co/tensorblock/granite-34b-code-base-8k-GGUF/tree/main/granite-34b-code-base-8k-Q3_K_S.gguf) | Q3_K_S | 13.791 GB | very small, high quality loss |
131
+ | [granite-34b-code-base-8k-Q3_K_M.gguf](https://huggingface.co/tensorblock/granite-34b-code-base-8k-GGUF/tree/main/granite-34b-code-base-8k-Q3_K_M.gguf) | Q3_K_M | 16.361 GB | very small, high quality loss |
132
+ | [granite-34b-code-base-8k-Q3_K_L.gguf](https://huggingface.co/tensorblock/granite-34b-code-base-8k-GGUF/tree/main/granite-34b-code-base-8k-Q3_K_L.gguf) | Q3_K_L | 18.207 GB | small, substantial quality loss |
133
+ | [granite-34b-code-base-8k-Q4_0.gguf](https://huggingface.co/tensorblock/granite-34b-code-base-8k-GGUF/tree/main/granite-34b-code-base-8k-Q4_0.gguf) | Q4_0 | 17.917 GB | legacy; small, very high quality loss - prefer using Q3_K_M |
134
+ | [granite-34b-code-base-8k-Q4_K_S.gguf](https://huggingface.co/tensorblock/granite-34b-code-base-8k-GGUF/tree/main/granite-34b-code-base-8k-Q4_K_S.gguf) | Q4_K_S | 18.110 GB | small, greater quality loss |
135
+ | [granite-34b-code-base-8k-Q4_K_M.gguf](https://huggingface.co/tensorblock/granite-34b-code-base-8k-GGUF/tree/main/granite-34b-code-base-8k-Q4_K_M.gguf) | Q4_K_M | 19.915 GB | medium, balanced quality - recommended |
136
+ | [granite-34b-code-base-8k-Q5_0.gguf](https://huggingface.co/tensorblock/granite-34b-code-base-8k-GGUF/tree/main/granite-34b-code-base-8k-Q5_0.gguf) | Q5_0 | 21.800 GB | legacy; medium, balanced quality - prefer using Q4_K_M |
137
+ | [granite-34b-code-base-8k-Q5_K_S.gguf](https://huggingface.co/tensorblock/granite-34b-code-base-8k-GGUF/tree/main/granite-34b-code-base-8k-Q5_K_S.gguf) | Q5_K_S | 21.800 GB | large, low quality loss - recommended |
138
+ | [granite-34b-code-base-8k-Q5_K_M.gguf](https://huggingface.co/tensorblock/granite-34b-code-base-8k-GGUF/tree/main/granite-34b-code-base-8k-Q5_K_M.gguf) | Q5_K_M | 23.050 GB | large, very low quality loss - recommended |
139
+ | [granite-34b-code-base-8k-Q6_K.gguf](https://huggingface.co/tensorblock/granite-34b-code-base-8k-GGUF/tree/main/granite-34b-code-base-8k-Q6_K.gguf) | Q6_K | 25.926 GB | very large, extremely low quality loss |
140
+ | [granite-34b-code-base-8k-Q8_0.gguf](https://huggingface.co/tensorblock/granite-34b-code-base-8k-GGUF/tree/main/granite-34b-code-base-8k-Q8_0.gguf) | Q8_0 | 33.518 GB | very large, extremely low quality loss - not recommended |
141
+
142
+
143
+ ## Downloading instruction
144
+
145
+ ### Command line
146
+
147
+ Firstly, install Huggingface Client
148
+
149
+ ```shell
150
+ pip install -U "huggingface_hub[cli]"
151
+ ```
152
+
153
+ Then, downoad the individual model file the a local directory
154
+
155
+ ```shell
156
+ huggingface-cli download tensorblock/granite-34b-code-base-8k-GGUF --include "granite-34b-code-base-8k-Q2_K.gguf" --local-dir MY_LOCAL_DIR
157
+ ```
158
+
159
+ If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try:
160
+
161
+ ```shell
162
+ huggingface-cli download tensorblock/granite-34b-code-base-8k-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf'
163
+ ```
granite-34b-code-base-8k-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b716f14e667a4a5c7af23a227de7b8489877ff97586c3bb825a37b574a0d2924
3
+ size 13107021280
granite-34b-code-base-8k-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:666086c36d2e4db351ff2b8091ec87a2043053ef718b0b61127b724e3f443a73
3
+ size 19549668832
granite-34b-code-base-8k-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59536115c52f3fd162e37d2310c090dfa5517905fdd3c0c18da6c4c365890fb8
3
+ size 17567860192
granite-34b-code-base-8k-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed3035b0207218547d8a22a7083ef67490bda5820ddf80966f4e52ba1d0e57b0
3
+ size 14807975392
granite-34b-code-base-8k-Q4_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b841cc79f4de50dae0da9923f11c1da25d251586d85cb177ac42aecf2e9a196e
3
+ size 19238241760
granite-34b-code-base-8k-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2eecb174f654015b6b5eec5dbb5b0b857300fc4eaa3320002f2821064aae47c8
3
+ size 21383628256
granite-34b-code-base-8k-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64ffd15c170db732e93e264954d8c03a09734a91a9b0837b85ee849a63f7c006
3
+ size 19445859808
granite-34b-code-base-8k-Q5_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5ab72164b6617dd9252f5a0358d2091dd45c999a77445c25249e3a0d41f2b5f4
3
+ size 23407904224
granite-34b-code-base-8k-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9b9fc16ac622ac8ab2529b91c32006e8d618ab7d5f25b1039efcd6356437f20
3
+ size 24749852128
granite-34b-code-base-8k-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:971472149f9dd3265501a0dd845d9ca2650087457644261fc73eaefd7b28b30f
3
+ size 23407904224
granite-34b-code-base-8k-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66a405c23ee5b877855415969d610ee1cc2e9c5bf3344b76a355738f7f0e8c82
3
+ size 27838170592
granite-34b-code-base-8k-Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2741b2ee9e0de5c320f73ebf4a1e38b9be096fe00af8254b3779089eb719ce6a
3
+ size 35990029792