loubnabnl HF staff commited on
Commit
d26da8b
1 Parent(s): f06f5f3

Create test_prompts.py

Browse files
Files changed (1) hide show
  1. test_prompts.py +106 -0
test_prompts.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+
3
+ BASE_PATH = "/fsx/loubna/projects/alignment-handbook/recipes/cosmo2/sft/data"
4
+ TEMPERATURE = 0.2
5
+ TOP_P = 0.9
6
+
7
+ CHECKPOINT = "loubnabnl/smollm-350M-instruct-add-basics"
8
+
9
+ print(f"💾 Loading the model and tokenizer: {CHECKPOINT}...")
10
+ device = "cuda"
11
+ tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT)
12
+ model_s = AutoModelForCausalLM.from_pretrained(CHECKPOINT).to(device)
13
+
14
+ print("🧪 Testing single-turn conversations...")
15
+ L = [
16
+ "Hi",
17
+ "Hello",
18
+ "Tell me a joke",
19
+ "Who are you?",
20
+ "What's your name?",
21
+ "How do I make pancakes?",
22
+ "Can you tell me what is gravity?",
23
+ "What is the capital of Morocco?",
24
+ "What's 2+2?",
25
+ "Hi, what is 2+1?",
26
+ "What's 3+5?",
27
+ "Write a poem about Helium",
28
+ "Hi, what are some popular dishes from Japan?",
29
+ ]
30
+
31
+
32
+ for i in range(len(L)):
33
+ print(f"🔮 {L[i]}")
34
+ messages = [{"role": "user", "content": L[i]}]
35
+ input_text = tokenizer.apply_chat_template(messages, tokenize=False)
36
+ inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
37
+ outputs = model_s.generate(
38
+ inputs, max_new_tokens=200, top_p=TOP_P, do_sample=True, temperature=TEMPERATURE
39
+ )
40
+ with open(
41
+ f"{BASE_PATH}/{CHECKPOINT.split('/')[-1]}_temp_{TEMPERATURE}_topp{TOP_P}.txt",
42
+ "a",
43
+ ) as f:
44
+ f.write("=" * 50 + "\n")
45
+ f.write(tokenizer.decode(outputs[0]))
46
+ f.write("\n")
47
+
48
+
49
+ print("🧪 Now testing multi-turn conversations...")
50
+ # Multi-turn conversations
51
+ messages_1 = [
52
+ {"role": "user", "content": "Hi"},
53
+ {"role": "assistant", "content": "Hello! How can I help you today?"},
54
+ {"role": "user", "content": "What's 2+2?"},
55
+ ]
56
+ messages_2 = [
57
+ {"role": "user", "content": "Hi"},
58
+ {"role": "assistant", "content": "Hello! How can I help you today?"},
59
+ {"role": "user", "content": "What's 2+2?"},
60
+ {"role": "assistant", "content": "4"},
61
+ {"role": "user", "content": "Why?"},
62
+ ]
63
+ messages_3 = [
64
+ {"role": "user", "content": "Who are you?"},
65
+ {"role": "assistant", "content": "I am an AI assistant. How can I help you today?"},
66
+ {"role": "user", "content": "What's your name?"},
67
+ ]
68
+ messages_4 = [
69
+ {"role": "user", "content": "Tell me a joke"},
70
+ {"role": "assistant", "content": "Sure! Why did the tomato turn red?"},
71
+ {"role": "user", "content": "Why?"},
72
+ ]
73
+ messages_5 = [
74
+ {"role": "user", "content": "Can you tell me what is gravity?"},
75
+ {
76
+ "role": "assistant",
77
+ "content": "Sure! Gravity is a force that attracts objects toward each other. It is what keeps us on the ground and what makes things fall.",
78
+ },
79
+ {"role": "user", "content": "Who discovered it?"},
80
+ ]
81
+ messages_6 = [
82
+ {"role": "user", "content": "How do I make pancakes?"},
83
+ {
84
+ "role": "assistant",
85
+ "content": "Sure! Here is a simple recipe for pancakes: Ingredients: 1 cup flour, 1 cup milk, 1 egg, 1 tbsp sugar, 1 tsp baking powder, 1/2 tsp salt. Instructions: 1. Mix all the dry ingredients together in a bowl. 2. Add the milk and egg and mix until smooth. 3. Heat a non-stick pan over medium heat. 4. Pour 1/4 cup of batter onto the pan. 5. Cook until bubbles form on the surface, then flip and cook for another minute. 6. Serve with your favorite toppings.",
86
+ },
87
+ {"role": "user", "content": "What are some popular toppings?"},
88
+ ]
89
+
90
+ L = [messages_1, messages_2, messages_3, messages_4, messages_5, messages_6]
91
+
92
+ for i in range(len(L)):
93
+ input_text = tokenizer.apply_chat_template(L[i], tokenize=False)
94
+ inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
95
+ outputs = model_s.generate(
96
+ inputs, max_new_tokens=200, top_p=TOP_P, do_sample=True, temperature=TEMPERATURE
97
+ )
98
+ with open(
99
+ f"{BASE_PATH}/{CHECKPOINT.split('/')[-1]}_temp_{TEMPERATURE}_topp{TOP_P}_MT.txt",
100
+ "a",
101
+ ) as f:
102
+ f.write("=" * 50 + "\n")
103
+ f.write(tokenizer.decode(outputs[0]))
104
+ f.write("\n")
105
+
106
+ print("🔥 Done!")