Adding Evaluation Results

#4
Files changed (1) hide show
  1. README.md +3 -75
README.md CHANGED
@@ -1,75 +1,3 @@
1
- ---
2
- license: openrail
3
- datasets:
4
- - shareAI/ShareGPT-Chinese-English-90k
5
- - shareAI/CodeChat
6
- language:
7
- - zh
8
- library_name: transformers
9
- tags:
10
- - code
11
- ---
12
-
13
- ## CodeLlaMa模型的中文化版本 (支持多轮对话)
14
-
15
- 科普:CodeLlaMa是专门用于代码助手的,与ChineseLlaMa不同,适用于代码类问题的回复。
16
- 用于多轮对话的推理代码:
17
- (可以直接复制运行,默认会自动拉取该模型权重)
18
-
19
- 关联Github仓库:https://github.com/CrazyBoyM/CodeLLaMA-chat
20
-
21
- ```
22
- # from Firefly
23
- from transformers import AutoModelForCausalLM, AutoTokenizer
24
- import torch
25
-
26
-
27
- def main():
28
- model_name = 'shareAI/CodeLLaMA-chat-13b-Chinese'
29
-
30
- device = 'cuda'
31
- max_new_tokens = 500 # 每轮对话最多生成多少个token
32
- history_max_len = 1000 # 模型记忆的最大token长度
33
- top_p = 0.9
34
- temperature = 0.35
35
- repetition_penalty = 1.0
36
-
37
- model = AutoModelForCausalLM.from_pretrained(
38
- model_name,
39
- trust_remote_code=True,
40
- low_cpu_mem_usage=True,
41
- torch_dtype=torch.float16,
42
- device_map='auto'
43
- ).to(device).eval()
44
- tokenizer = AutoTokenizer.from_pretrained(
45
- model_name,
46
- trust_remote_code=True,
47
- use_fast=False
48
- )
49
-
50
-
51
- history_token_ids = torch.tensor([[]], dtype=torch.long)
52
-
53
- user_input = input('User:')
54
- while True:
55
- input_ids = tokenizer(user_input, return_tensors="pt", add_special_tokens=False).input_ids
56
- eos_token_id = torch.tensor([[tokenizer.eos_token_id]], dtype=torch.long)
57
- user_input_ids = torch.concat([input_ids, eos_token_id], dim=1)
58
- history_token_ids = torch.concat((history_token_ids, user_input_ids), dim=1)
59
- model_input_ids = history_token_ids[:, -history_max_len:].to(device)
60
- with torch.no_grad():
61
- outputs = model.generate(
62
- input_ids=model_input_ids, max_new_tokens=max_new_tokens, do_sample=True, top_p=top_p,
63
- temperature=temperature, repetition_penalty=repetition_penalty, eos_token_id=tokenizer.eos_token_id
64
- )
65
- model_input_ids_len = model_input_ids.size(1)
66
- response_ids = outputs[:, model_input_ids_len:]
67
- history_token_ids = torch.concat((history_token_ids, response_ids.cpu()), dim=1)
68
- response = tokenizer.batch_decode(response_ids)
69
- print("Bot:" + response[0].strip().replace(tokenizer.eos_token, ""))
70
- user_input = input('User:')
71
-
72
-
73
- if __name__ == '__main__':
74
- main()
75
- ```
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38eb9f6643b200d6d1344a9acf9073b81cfe12b4c039d1379ff95431642b8463
3
+ size 3149