LoneStriker commited on
Commit
595aa6f
1 Parent(s): f1c0009

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -1,35 +1,8 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ CodeLlama-70b-Instruct-hf-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
2
+ CodeLlama-70b-Instruct-hf-Q3_K_L.gguf filter=lfs diff=lfs merge=lfs -text
3
+ CodeLlama-70b-Instruct-hf-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
4
+ CodeLlama-70b-Instruct-hf-Q3_K_S.gguf filter=lfs diff=lfs merge=lfs -text
5
+ CodeLlama-70b-Instruct-hf-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
6
+ CodeLlama-70b-Instruct-hf-Q4_K_S.gguf filter=lfs diff=lfs merge=lfs -text
7
+ CodeLlama-70b-Instruct-hf-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
8
+ CodeLlama-70b-Instruct-hf-Q5_K_S.gguf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
CodeLlama-70b-Instruct-hf-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79df80954053dd7dab895b990e9cf0137fab92ae38f3a02a43fc626a0a38cb06
3
+ size 25462587840
CodeLlama-70b-Instruct-hf-Q3_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7e35f0e89db33a34cb738c17cc828507e088ee1623ab4d27d57576e23e08407
3
+ size 36148000192
CodeLlama-70b-Instruct-hf-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46fa7c7497f8551c774ea6a832843d40c7f7041909f648178eb88a732f9c13cf
3
+ size 33274901952
CodeLlama-70b-Instruct-hf-Q3_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9288e6ab63da15d21fd05941d642c53a49cae2343f3c351bdbace55e8d9fc50f
3
+ size 29919458752
CodeLlama-70b-Instruct-hf-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de9558512f2b26b82be73c50ad4bdc2578403a787a76c8dda455f6683269b299
3
+ size 41423092160
CodeLlama-70b-Instruct-hf-Q4_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e75023e94f2e991a6f853ae94db391bf9048a40f4a0f9778f36797bd00589f0e
3
+ size 39249918400
CodeLlama-70b-Instruct-hf-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c35f289b7da7a9b5d91b535bd9b48e46ea6608fff7792b40a3766bc38ea5ed4f
3
+ size 48753965504
CodeLlama-70b-Instruct-hf-Q5_K_S.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a49836e92363a2f24d28dda4b9fc3d8dad9e39260d629e69e3dfb17538d61a57
3
+ size 47461595584
README.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language:
3
+ - code
4
+ pipeline_tag: text-generation
5
+ tags:
6
+ - llama-2
7
+ license: llama2
8
+ ---
9
+ # **Code Llama**
10
+ Code Llama is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 70B instruct-tuned version in the Hugging Face Transformers format. This model is designed for general code synthesis and understanding. Links to other models can be found in the index at the bottom.
11
+
12
+ | | Base Model | Python | Instruct |
13
+ | --- | ----------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------- |
14
+ | 7B | [codellama/CodeLlama-7b-hf](https://huggingface.co/codellama/CodeLlama-7b-hf) | [codellama/CodeLlama-7b-Python-hf](https://huggingface.co/codellama/CodeLlama-7b-Python-hf) | [codellama/CodeLlama-7b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-7b-Instruct-hf) |
15
+ | 13B | [codellama/CodeLlama-13b-hf](https://huggingface.co/codellama/CodeLlama-13b-hf) | [codellama/CodeLlama-13b-Python-hf](https://huggingface.co/codellama/CodeLlama-13b-Python-hf) | [codellama/CodeLlama-13b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-13b-Instruct-hf) |
16
+ | 34B | [codellama/CodeLlama-34b-hf](https://huggingface.co/codellama/CodeLlama-34b-hf) | [codellama/CodeLlama-34b-Python-hf](https://huggingface.co/codellama/CodeLlama-34b-Python-hf) | [codellama/CodeLlama-34b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-34b-Instruct-hf) |
17
+ | 70B | [codellama/CodeLlama-70b-hf](https://huggingface.co/codellama/CodeLlama-70b-hf) | [codellama/CodeLlama-70b-Python-hf](https://huggingface.co/codellama/CodeLlama-70b-Python-hf) | [codellama/CodeLlama-70b-Instruct-hf](https://huggingface.co/codellama/CodeLlama-70b-Instruct-hf) |
18
+
19
+ ## Model Use
20
+
21
+ Install `transformers`
22
+
23
+ ```bash
24
+ pip install transformers accelerate
25
+ ```
26
+
27
+ **Warning:** The 70B Instruct model has a different prompt template than the smaller versions. We'll update this repo soon.
28
+
29
+ Model capabilities:
30
+
31
+ - [x] Code completion.
32
+ - [ ] Infilling.
33
+ - [x] Instructions / chat.
34
+ - [ ] Python specialist.
35
+
36
+ ## Model Details
37
+ *Note: Use of this model is governed by the Meta license. Meta developed and publicly released the Code Llama family of large language models (LLMs).
38
+
39
+ **Model Developers** Meta
40
+
41
+ **Variations** Code Llama comes in four model sizes, and three variants:
42
+
43
+ * Code Llama: base models designed for general code synthesis and understanding
44
+ * Code Llama - Python: designed specifically for Python
45
+ * Code Llama - Instruct: for instruction following and safer deployment
46
+
47
+ All variants are available in sizes of 7B, 13B, 34B, and 70B parameters.
48
+
49
+ **This repository contains the Instruct version of the 70B parameters model.**
50
+
51
+ **Input** Models input text only.
52
+
53
+ **Output** Models generate text only.
54
+
55
+ **Model Architecture** Code Llama is an auto-regressive language model that uses an optimized transformer architecture. It was fine-tuned with up to 16k tokens. This variant **does not** support long context of up to 100k tokens.
56
+
57
+ **Model Dates** Code Llama and its variants have been trained between January 2023 and January 2024.
58
+
59
+ **Status** This is a static model trained on an offline dataset. Future versions of Code Llama - Instruct will be released as we improve model safety with community feedback.
60
+
61
+ **License** A custom commercial license is available at: [https://ai.meta.com/resources/models-and-libraries/llama-downloads/](https://ai.meta.com/resources/models-and-libraries/llama-downloads/)
62
+
63
+ **Research Paper** More information can be found in the paper "[Code Llama: Open Foundation Models for Code](https://ai.meta.com/research/publications/code-llama-open-foundation-models-for-code/)" or its [arXiv page](https://arxiv.org/abs/2308.12950).
64
+
65
+ ## Intended Use
66
+ **Intended Use Cases** Code Llama and its variants are intended for commercial and research use in English and relevant programming languages. The base model Code Llama can be adapted for a variety of code synthesis and understanding tasks, Code Llama - Python is designed specifically to handle the Python programming language, and Code Llama - Instruct is intended to be safer to use for code assistant and generation applications.
67
+
68
+ **Out-of-Scope Uses** Use in any manner that violates applicable laws or regulations (including trade compliance laws). Use in languages other than English. Use in any other way that is prohibited by the Acceptable Use Policy and Licensing Agreement for Code Llama and its variants.
69
+
70
+ ## Hardware and Software
71
+ **Training Factors** We used custom training libraries. The training and fine-tuning of the released models have been performed Meta’s Research Super Cluster.\\**Carbon Footprint** In aggregate, training all 12 Code Llama models required 1400K GPU hours of computation on hardware of type A100-80GB (TDP of 350-400W). Estimated total emissions were 228.55 tCO2eq, 100% of which were offset by Meta’s sustainability program.
72
+
73
+ ## Evaluation Results
74
+
75
+ See evaluations for the main models and detailed ablations in Section 3 and safety evaluations in Section 4 of the research paper.
76
+
77
+
78
+ ## Ethical Considerations and Limitations
79
+
80
+ Code Llama and its variants are a new technology that carries risks with use. Testing conducted to date has been in English, and has not covered, nor could it cover all scenarios. For these reasons, as with all LLMs, Code Llama’s potential outputs cannot be predicted in advance, and the model may in some instances produce inaccurate or objectionable responses to user prompts. Therefore, before deploying any applications of Code Llama, developers should perform safety testing and tuning tailored to their specific applications of the model.
81
+
82
+ Please see the Responsible Use Guide available available at [https://ai.meta.com/llama/responsible-use-guide](https://ai.meta.com/llama/responsible-use-guide).
huggingface-metadata.txt ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ url: https://huggingface.co/codellama/CodeLlama-70b-Instruct-hf
2
+ branch: main
3
+ download date: 2024-01-29 12:30:41
4
+ sha256sum:
5
+ 4004c3d5888e6ce39e5f7210cfd4f1041cfa9e7b98b179189c66ff376817d132 model-00001-of-00029.safetensors
6
+ 292a56743606d7817854f557d4aa03dc290781194de6b29abf4a79289408176a model-00002-of-00029.safetensors
7
+ 61e6c87742d3573e8888ca82bbd636379a90622118446b351024e311ef7127d9 model-00003-of-00029.safetensors
8
+ bd23b4c551f19c4e5efd4b27234f09e55c43a2983a8373b6a17dab71995a797c model-00004-of-00029.safetensors
9
+ bfff1ca13c2820b277be89462ffb691914ba606e7971f1de318bb424bf06f476 model-00005-of-00029.safetensors
10
+ 603c5c7049ac80d8c336cab433fa3db35044a9aaa0f55a3a86a55db192062224 model-00006-of-00029.safetensors
11
+ dca4afa281317faf74c0a49057e5228133c6638c8bd247883cb7ed04c5ad41f6 model-00007-of-00029.safetensors
12
+ 7b79ae57c40efe6856a24aef51b16829b37907d1d8b71a39c315f350d0170414 model-00008-of-00029.safetensors
13
+ 1662aff5cffe43817ca3bc1a892230d2f20cf5043c983a20005710d3ebe554c2 model-00009-of-00029.safetensors
14
+ 726e1ac7355be1a13c4d1767b34fa280f5a84863bedbaa914d8c9e723a29299a model-00010-of-00029.safetensors
15
+ a885ac631e76cafaf2873cf7b981733515080104a14cbd79d9023a29fe025d0a model-00011-of-00029.safetensors
16
+ 2c280eaf1264474081c067086966ed8d6bb0d675cd900d2f7684b575ee986ada model-00012-of-00029.safetensors
17
+ 137fa39c941d33e250629c49d2508a28ffc90d2f25f9ba6664ff2d21fe5908ce model-00013-of-00029.safetensors
18
+ 05bb8d3b6d36e86dcdf89723a8e56582928cb0c1439cddcebc7956d18c1c386d model-00014-of-00029.safetensors
19
+ 4202741df46eef1368120a5e2a43f317ac0fa1a57612b97024ee719a057b83e2 model-00015-of-00029.safetensors
20
+ e4a51d3dd123fa55e3db71398195d0d1fa6a45434a4390f97ea719db3deab399 model-00016-of-00029.safetensors
21
+ c594a072d0fba4feaf78468ef3be0323b621ecd8889729f6f935d0327f7eda06 model-00017-of-00029.safetensors
22
+ c9decc05bed8faecaf68866d56b561b00b6685756e42b1cafaf2154bafe9748f model-00018-of-00029.safetensors
23
+ d8e066597d400c6a3044b1a01b44a0ac3692c259e60e7df35d0184d968fb33f2 model-00019-of-00029.safetensors
24
+ 6d34ebc19cdbea766d324ae9ba6b5438c749658972db36fc243d4590883cda9b model-00020-of-00029.safetensors
25
+ 33c3d992bffcd3c0226be2ef5e7b8a51b13ac9c8d9fd9a1eaea9278cfa5e9491 model-00021-of-00029.safetensors
26
+ 08f604ab4fc99fa2e9105c0f213e12076f40d8a79a2bbdb12ad4bf55255a358f model-00022-of-00029.safetensors
27
+ 9315d19913c704ba028a96bcd0cf3504e90c1bb2b01f186c27a46b8ba6b13f16 model-00023-of-00029.safetensors
28
+ 5d737747c440e1de8a90a9bcce61842a651b9715c90d37c154a5c30ff282a41e model-00024-of-00029.safetensors
29
+ d0f8b5d80575db4750d331dc59d0b314a364e82b4df01fe1cd839742c7c26f5d model-00025-of-00029.safetensors
30
+ d4fad963661b511c2384c283117ee473d2c7a9565dbed0a9f20e1856737e88d4 model-00026-of-00029.safetensors
31
+ a41bc9a66524cd90cc365216385c43e1d01aab1c7228d6d9160db90960bd0450 model-00027-of-00029.safetensors
32
+ d08a5e11252d8d27dd82f45f39f341daa3d10791edfa663be10c251e5d93f046 model-00028-of-00029.safetensors
33
+ 60c2987c400296194d45bac0d581f87c3c064fb9b1bff9fdfc153ecaa684ce30 model-00029-of-00029.safetensors
34
+ 555c88c89d64f969f2f3c62430916e3fb76802c41ae286bd315bdb3e184d9c83 pytorch_model-00001-of-00029.bin
35
+ a069f184aab63ce3f116ba8383612267c46f92ef63453fdb4f97599e66246ccd pytorch_model-00002-of-00029.bin
36
+ ba8ffc01c22f824bde38a20ee1daf28ec730fc1afd6875671f5e8d0f499e1edb pytorch_model-00003-of-00029.bin
37
+ 91d5d5285d0e2b80c49611cc9374e2715638b0cda9048222bcdf040165e9ae0f pytorch_model-00004-of-00029.bin
38
+ 43a1562e34afffe1940f191a3eeb2b557b284aff4880cece439487b35ea4a45d pytorch_model-00005-of-00029.bin
39
+ 603c8dec52370bac4ca1ecdb5625a13b9603c5a20198181241a1fe943d1063b1 pytorch_model-00006-of-00029.bin
40
+ b634b279e3a8579110a8c7509fe813965900afaa4aaaf4d0139b01e2d7818a83 pytorch_model-00007-of-00029.bin
41
+ f9d20d25cf37021ff5572b1906b37e356f8cdbba772d40a0eeda8ffb0ac27f73 pytorch_model-00008-of-00029.bin
42
+ 10e3697c38982fdb0c56e6e022a215a3e6673c5d073ce8b6c4b91e3369608c50 pytorch_model-00009-of-00029.bin
43
+ ea49cca3be9c5b5235e59aa4eef8edbbd6f86414e6acf667431e68206ce250b9 pytorch_model-00010-of-00029.bin
44
+ 1d9905f1df7c725cede45b212c801c2820410375d3c145626b1cd3bcc0388b7b pytorch_model-00011-of-00029.bin
45
+ 254723a6b37a0a194bb5256898897b6ab594b61d1fd60b932565f6d7e73a7fc4 pytorch_model-00012-of-00029.bin
46
+ fb8e96cad83972c1153dddcf455d13378ffcc5efdee0ae32af62ae917e59f4d8 pytorch_model-00013-of-00029.bin
47
+ c97ce45487202b5d455a22121a137d194501872576d1c97bab50519eb6672875 pytorch_model-00014-of-00029.bin
48
+ 7f924e79868a6bab5eeac6b9f12d7f74eb6f122a20b1ac2df9a11433982449ba pytorch_model-00015-of-00029.bin
49
+ e8f571c8016711bee8c0eeb2741e2730d9032c0eefef343e895e376e8b39a6be pytorch_model-00016-of-00029.bin
50
+ 2e4040e65d15558b0c09e62fdf7a80e5d1ce6b3ae54ccda4086a903053b44df9 pytorch_model-00017-of-00029.bin
51
+ 8f77aff8e2049f07c114ef246f25b2dce28217dad6dbb85afbabfe8b31d00207 pytorch_model-00018-of-00029.bin
52
+ 9505ab17ebe35c84208ff27ddd424f017db272864c43d8b31a5bc012381ff3b5 pytorch_model-00019-of-00029.bin
53
+ 4e74de256316e0415034b81d739a3d0c3404c06188f78cd7f14d3893206b215d pytorch_model-00020-of-00029.bin
54
+ dafd09f3237a55064e79d73549538794dc0059e8fd7e604f368f5849457a62cf pytorch_model-00021-of-00029.bin
55
+ 963968112411bffc3d1c9f3ae7573b73f1032d9ee8b1b60b22bc8bc219fde903 pytorch_model-00022-of-00029.bin
56
+ 121a2abc33a183fd8d4a65280228af09d277a79b04dd0369c7a6b8f85e23569b pytorch_model-00023-of-00029.bin
57
+ 61b6119c0fbe0410f519021487f51a7526b2b51aa152a567735e8934bd22f271 pytorch_model-00024-of-00029.bin
58
+ 4d2a306b9dc55a03ca0f250bc25cc9e0c87aea0b8e352fd3e5517e9d021917b0 pytorch_model-00025-of-00029.bin
59
+ d195d8b4bf3e4b4fe266cc55a3ff2a8b1337f9f808108e637dd21254b83be368 pytorch_model-00026-of-00029.bin
60
+ 06df3e01915ad034b7e62658e159e6e2c87b5f12643676a38ea58e772cacd78f pytorch_model-00027-of-00029.bin
61
+ 856d7a5d32b3d227bdf3eb2dd7d43911af33f4be14ba7f7a6a0f027adaf3e226 pytorch_model-00028-of-00029.bin
62
+ 2dd33e71be1522707b2a70db7f7aa608166f89ae4f9e26037358165c7bfd40a7 pytorch_model-00029-of-00029.bin
63
+ 99049b351301fb75b3b0587a484b675cbfd51abe27d2b92eabd385e4c41f97e9 tokenizer.model