nisten commited on
Commit
49716da
β€’
1 Parent(s): 5f78dd7

pushing cuda mlc no sharding

Browse files
README.md ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: llama2
3
+ datasets:
4
+ - glaiveai/glaive-code-assistant
5
+ language:
6
+ - en
7
+ tags:
8
+ - code
9
+ ---
10
+
11
+ # Glaive-coder-7b
12
+
13
+ Glaive-coder-7b is a 7B parameter code model trained on a dataset of ~140k programming related problems and solutions generated from Glaive’s synthetic data generation platform.
14
+
15
+ The model is fine-tuned on the CodeLlama-7b model.
16
+
17
+ ## Usage:
18
+
19
+ The model is trained to act as a code assistant, and can do both single instruction following and multi-turn conversations.
20
+ It follows the same prompt format as CodeLlama-7b-Instruct-
21
+ ```
22
+ <s>[INST]
23
+ <<SYS>>
24
+ {{ system_prompt }}
25
+ <</SYS>>
26
+
27
+ {{ user_msg }} [/INST] {{ model_answer }} </s>
28
+ <s>[INST] {{ user_msg }} [/INST]
29
+ ```
30
+
31
+ You can run the model in the following way-
32
+
33
+ ```python
34
+ from transformers import AutoModelForCausalLM , AutoTokenizer
35
+
36
+ tokenizer = AutoTokenizer.from_pretrained("glaiveai/glaive-coder-7b")
37
+ model = AutoModelForCausalLM.from_pretrained("glaiveai/glaive-coder-7b").half().cuda()
38
+
39
+ def fmt_prompt(prompt):
40
+ return f"<s> [INST] {prompt} [/INST]"
41
+
42
+ inputs = tokenizer(fmt_prompt(prompt),return_tensors="pt").to(model.device)
43
+
44
+ outputs = model.generate(**inputs,do_sample=True,temperature=0.1,top_p=0.95,max_new_tokens=100)
45
+
46
+ print(tokenizer.decode(outputs[0],skip_special_tokens=True,clean_up_tokenization_spaces=False))
47
+ ```
48
+
49
+ ## Benchmarks:
50
+
51
+ The model achieves a 63.1% pass@1 on HumanEval and a 45.2% pass@1 on MBPP, however it is evident that these benchmarks are not representative of real-world usage of code models so we are launching the [Code Models Arena](https://arena.glaive.ai/) to let users vote on model outputs so we can have a better understanding of user preference on code models and come up with new and better benchmarks. We plan to release the Arena results as soon as we have a sufficient amount of data.
52
+
53
+ Join the Glaive [discord](https://discord.gg/fjQ4uf3yWD) for improvement suggestions, bug-reports and collaborating on more open-source projects.
added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<|pad|>": 32016
3
+ }
config.json ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "codellama/CodeLlama-7b-Instruct-hf",
3
+ "architectures": [
4
+ "LlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "hidden_act": "silu",
9
+ "hidden_size": 4096,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 11008,
12
+ "max_position_embeddings": 16384,
13
+ "max_seq_len": 4096,
14
+ "model_type": "llama",
15
+ "num_attention_heads": 32,
16
+ "num_hidden_layers": 32,
17
+ "num_key_value_heads": 32,
18
+ "pretraining_tp": 1,
19
+ "rms_norm_eps": 1e-05,
20
+ "rope_scaling": null,
21
+ "rope_theta": 1000000,
22
+ "tie_word_embeddings": false,
23
+ "torch_dtype": "float32",
24
+ "transformers_version": "4.32.0",
25
+ "use_cache": true,
26
+ "vocab_size": 32017
27
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.32.0"
6
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4c35e92e96e3493907fb7e52af0638176ee92dd1067c64d3081ed2a0f19972d
3
+ size 26954367285
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "▁<PRE>",
4
+ "▁<MID>",
5
+ "▁<SUF>",
6
+ "▁<EOT>"
7
+ ],
8
+ "bos_token": {
9
+ "content": "<s>",
10
+ "lstrip": false,
11
+ "normalized": true,
12
+ "rstrip": false,
13
+ "single_word": false
14
+ },
15
+ "eos_token": {
16
+ "content": "</s>",
17
+ "lstrip": false,
18
+ "normalized": true,
19
+ "rstrip": false,
20
+ "single_word": false
21
+ },
22
+ "pad_token": "<|pad|>",
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45ccb9c8b6b561889acea59191d66986d314e7cbd6a78abc6e49b139ca91c1e6
3
+ size 500058
tokenizer_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "additional_special_tokens": [
5
+ "▁<PRE>",
6
+ "▁<MID>",
7
+ "▁<SUF>",
8
+ "▁<EOT>"
9
+ ],
10
+ "bos_token": {
11
+ "__type": "AddedToken",
12
+ "content": "<s>",
13
+ "lstrip": false,
14
+ "normalized": true,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "clean_up_tokenization_spaces": false,
19
+ "eos_token": {
20
+ "__type": "AddedToken",
21
+ "content": "</s>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "eot_token": "▁<EOT>",
28
+ "fill_token": "<FILL_ME>",
29
+ "legacy": null,
30
+ "middle_token": "▁<MID>",
31
+ "model_max_length": 4096,
32
+ "pad_token": null,
33
+ "padding_side": "right",
34
+ "prefix_token": "▁<PRE>",
35
+ "sp_model_kwargs": {},
36
+ "suffix_first": false,
37
+ "suffix_token": "▁<SUF>",
38
+ "tokenizer_class": "CodeLlamaTokenizer",
39
+ "trust_remote_code": true,
40
+ "unk_token": {
41
+ "__type": "AddedToken",
42
+ "content": "<unk>",
43
+ "lstrip": false,
44
+ "normalized": true,
45
+ "rstrip": false,
46
+ "single_word": false
47
+ },
48
+ "use_fast": false
49
+ }