BoyangZ commited on
Commit
cc164e5
1 Parent(s): 2360cde

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +61 -0
README.md CHANGED
@@ -1,3 +1,64 @@
1
  ---
2
  license: apache-2.0
 
 
 
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: apache-2.0
3
+ language:
4
+ - zh
5
+ pipeline_tag: text-generation
6
  ---
7
+
8
+ How to use:
9
+ ------
10
+
11
+ from transformers import AutoTokenizer, AutoModelForCausalLM
12
+ import torch
13
+
14
+ model_id = "BoyangZ/Llama3-chinese_chat_ft"
15
+
16
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
17
+ model = AutoModelForCausalLM.from_pretrained(
18
+ model_id,
19
+ torch_dtype=torch.bfloat16,
20
+ device_map="auto",
21
+ )
22
+
23
+ messages = [
24
+ {"role": "system", "content": "You are a LLM assistant. Users will ask you questions in Chinese, You will answer questions in Chinese"},
25
+ {"role": "user", "content": "李白是哪个朝代的人?"},
26
+ ]
27
+
28
+ input_ids = tokenizer.apply_chat_template(
29
+ messages,
30
+ add_generation_prompt=True,
31
+ return_tensors="pt"
32
+ ).to(model.device)
33
+
34
+ terminators = [
35
+ tokenizer.eos_token_id,
36
+ tokenizer.convert_tokens_to_ids("<|eot_id|>")
37
+ ]
38
+
39
+ outputs = model.generate(
40
+ input_ids,
41
+ max_new_tokens=256,
42
+ eos_token_id=terminators,
43
+ do_sample=True,
44
+ temperature=0.6,
45
+ top_p=0.9,
46
+ )
47
+ response = outputs[0][input_ids.shape[-1]:]
48
+ print(tokenizer.decode(response, skip_special_tokens=True))
49
+
50
+ example1
51
+ ---
52
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/644a78de7c5c68c7762886eb/uvOKN0WPumRVwE_kPkFKj.png)
53
+
54
+ example2
55
+ ---
56
+
57
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/644a78de7c5c68c7762886eb/FoExkJHBp-yM6-XFwaDpG.png)
58
+
59
+ example3
60
+ ---
61
+
62
+
63
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/644a78de7c5c68c7762886eb/1EorUSsh-28LZFZpp768k.png)
64
+