Commit
•
9efbb95
1
Parent(s):
c474bbd
improve output formatting
Browse files
app.py
CHANGED
@@ -35,8 +35,12 @@ terminators = [
|
|
35 |
|
36 |
@spaces.GPU
|
37 |
def generate_instruction_response():
|
38 |
-
prompt_info = f"""Generating
|
39 |
-
|
|
|
|
|
|
|
|
|
40 |
yield prompt_info
|
41 |
instruction = pipeline(
|
42 |
extract_input,
|
@@ -51,9 +55,10 @@ def generate_instruction_response():
|
|
51 |
len(extract_input) :
|
52 |
].split("\n")[0]
|
53 |
|
54 |
-
first_step =
|
55 |
-
|
56 |
-
|
|
|
57 |
|
58 |
response_template = f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{sanitized_instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"""
|
59 |
|
@@ -68,14 +73,34 @@ def generate_instruction_response():
|
|
68 |
|
69 |
assistant_response = response[0]["generated_text"][len(response_template) :]
|
70 |
|
71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
|
74 |
-
title = "Magpie
|
75 |
description = """
|
76 |
-
This Gradio demo showcases the approach described in the Magpie paper. Magpie is a data synthesis pipeline that creates high-quality alignment data without relying on prompt engineering or seed questions. Instead, it generates instruction data by prompting aligned LLMs with a pre-query template.
|
77 |
|
78 |
-
In this demo, you can see how the model generates a user instruction and a model response.
|
79 |
|
80 |
You can learn more about the approach [in the paper](https://huggingface.co/papers/2406.08464).
|
81 |
"""
|
@@ -83,7 +108,7 @@ You can learn more about the approach [in the paper](https://huggingface.co/pape
|
|
83 |
iface = gr.Interface(
|
84 |
fn=generate_instruction_response,
|
85 |
inputs=[],
|
86 |
-
outputs=[gr.Markdown("Generated
|
87 |
title=title,
|
88 |
description=description,
|
89 |
submit_btn="Generate Instructions Response Pair",
|
|
|
35 |
|
36 |
@spaces.GPU
|
37 |
def generate_instruction_response():
|
38 |
+
prompt_info = f"""### Generating user prompt using the template:
|
39 |
+
|
40 |
+
```
|
41 |
+
{extract_input}
|
42 |
+
```
|
43 |
+
"""
|
44 |
yield prompt_info
|
45 |
instruction = pipeline(
|
46 |
extract_input,
|
|
|
55 |
len(extract_input) :
|
56 |
].split("\n")[0]
|
57 |
|
58 |
+
first_step = (
|
59 |
+
prompt_info + f"### LLM generated instruction:\n\n{sanitized_instruction}"
|
60 |
+
)
|
61 |
+
yield first_step + "\n\n### Generating LLM response..."
|
62 |
|
63 |
response_template = f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{sanitized_instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"""
|
64 |
|
|
|
73 |
|
74 |
assistant_response = response[0]["generated_text"][len(response_template) :]
|
75 |
|
76 |
+
final_output = f"""### Template used for generating instruction:
|
77 |
+
|
78 |
+
```
|
79 |
+
{extract_input}
|
80 |
+
```
|
81 |
+
|
82 |
+
### LLM Generated Instruction:
|
83 |
+
|
84 |
+
{sanitized_instruction}
|
85 |
+
|
86 |
+
### Template used for generating response:
|
87 |
+
|
88 |
+
```
|
89 |
+
{response_template}
|
90 |
+
```
|
91 |
+
|
92 |
+
### LLM Generated Response:
|
93 |
+
|
94 |
+
{assistant_response}
|
95 |
+
"""
|
96 |
+
yield final_output
|
97 |
|
98 |
|
99 |
+
title = "Magpie Demo"
|
100 |
description = """
|
101 |
+
This Gradio demo showcases the approach described in the Magpie paper. Magpie is a data synthesis pipeline that creates high-quality alignment data without relying on prompt engineering or seed questions. Instead, it generates instruction data by prompting aligned LLMs with a pre-query template.
|
102 |
|
103 |
+
In this demo, you can see how the model generates a user instruction and a model response, along with the templates used in the process.
|
104 |
|
105 |
You can learn more about the approach [in the paper](https://huggingface.co/papers/2406.08464).
|
106 |
"""
|
|
|
108 |
iface = gr.Interface(
|
109 |
fn=generate_instruction_response,
|
110 |
inputs=[],
|
111 |
+
outputs=[gr.Markdown(label="Generated Data")],
|
112 |
title=title,
|
113 |
description=description,
|
114 |
submit_btn="Generate Instructions Response Pair",
|