davanstrien HF staff commited on
Commit
c52dd43
1 Parent(s): eaf638e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -0
app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
+
4
+ model = AutoModelForCausalLM.from_pretrained("instruction-pretrain/instruction-synthesizer")
5
+ tokenizer = AutoTokenizer.from_pretrained("instruction-pretrain/instruction-synthesizer")
6
+
7
+ def parse_pred(pred):
8
+ """Extract the list of instruction-response pairs from the prediction"""
9
+ QA_str_list = pred.split('</END>')
10
+ if not pred.endswith('</END>'):
11
+ QA_str_list = QA_str_list[:-1]
12
+
13
+ QA_list = []
14
+ raw_questions = []
15
+ for QA_str in QA_str_list:
16
+ try:
17
+ assert len(QA_str.split('<ANS>')) == 2, f'invalid QA string: {QA_str}'
18
+ Q_str, A_str = QA_str.split('<ANS>')
19
+ Q_str, A_str = Q_str.strip(), A_str.strip()
20
+ assert Q_str.startswith('<QUE>'), f'invalid question string: {Q_str} in QA_str: {QA_str}'
21
+ assert len(A_str) > 0, f'invalid answer string in QA_str: {QA_str}'
22
+ Q_str = Q_str.replace('<QUE>', '').strip()
23
+ assert Q_str.lower() not in raw_questions, f'duplicate question: {Q_str}'
24
+ QA_list.append({'Q': Q_str, 'A': A_str})
25
+ raw_questions.append(Q_str.lower())
26
+ except:
27
+ pass
28
+
29
+ return QA_list
30
+
31
+
32
+ def get_instruction_response_pairs(context):
33
+ '''Prompt the synthesizer to generate instruction-response pairs based on the given context'''
34
+ prompt = f'<s> <CON> {context} </CON>\n\n'
35
+ inputs = tokenizer(prompt, add_special_tokens=False, return_tensors="pt").input_ids.to(model.device)
36
+ outputs = model.generate(input_ids=inputs, max_new_tokens=400, do_sample=False)[0]
37
+
38
+ pred_start = int(inputs.shape[-1])
39
+ pred = tokenizer.decode(outputs[pred_start:], skip_special_tokens=True)
40
+ return parse_pred(pred)
41
+
42
+ def generate_pairs(context):
43
+ instruction_response_pairs = get_instruction_response_pairs(context)
44
+ output = ""
45
+ for index, pair in enumerate(instruction_response_pairs):
46
+ output += f"## Instruction {index + 1}:\n{pair['Q']}\n## Response {index + 1}:\n{pair['A']}\n\n"
47
+ return output
48
+
49
+ # Create Gradio interface
50
+ iface = gr.Interface(
51
+ fn=generate_pairs,
52
+ inputs=gr.Textbox(lines=5, label="Enter context here"),
53
+ outputs=gr.Textbox(lines=20, label="Generated Instruction-Response Pairs"),
54
+ title="Instruction-Response Pair Generator",
55
+ description="Enter a context, and the model will generate relevant instruction-response pairs."
56
+ )
57
+
58
+ # Launch the interface
59
+ iface.launch()