Commit
•
be2e1b0
1
Parent(s):
13ff84d
yield responses
Browse files
app.py
CHANGED
@@ -48,6 +48,8 @@ def generate_instruction_response():
|
|
48 |
len(extract_input) :
|
49 |
].split("\n")[0]
|
50 |
|
|
|
|
|
51 |
response_template = f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{sanitized_instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"""
|
52 |
|
53 |
response = pipeline(
|
@@ -59,10 +61,9 @@ def generate_instruction_response():
|
|
59 |
top_p=1,
|
60 |
)
|
61 |
|
62 |
-
user_message = sanitized_instruction
|
63 |
assistant_response = response[0]["generated_text"][len(response_template) :]
|
64 |
|
65 |
-
|
66 |
|
67 |
|
68 |
title = "Magpie demo"
|
@@ -77,10 +78,7 @@ You can learn more about the approach [in the paper](https://huggingface.co/pape
|
|
77 |
iface = gr.Interface(
|
78 |
fn=generate_instruction_response,
|
79 |
inputs=[],
|
80 |
-
outputs=[
|
81 |
-
gr.Markdown(label="Generated User Instruction"),
|
82 |
-
gr.Markdown(label="Generated Model Response"),
|
83 |
-
],
|
84 |
title=title,
|
85 |
description=description,
|
86 |
submit_btn="Generate Instructions Response Pair",
|
|
|
48 |
len(extract_input) :
|
49 |
].split("\n")[0]
|
50 |
|
51 |
+
yield "## Generated instructions: \n" + sanitized_instruction
|
52 |
+
|
53 |
response_template = f"""<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{sanitized_instruction}<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n"""
|
54 |
|
55 |
response = pipeline(
|
|
|
61 |
top_p=1,
|
62 |
)
|
63 |
|
|
|
64 |
assistant_response = response[0]["generated_text"][len(response_template) :]
|
65 |
|
66 |
+
yield "## Generated response: \n" + assistant_response
|
67 |
|
68 |
|
69 |
title = "Magpie demo"
|
|
|
78 |
iface = gr.Interface(
|
79 |
fn=generate_instruction_response,
|
80 |
inputs=[],
|
81 |
+
outputs=[gr.Markdown("Generated data")],
|
|
|
|
|
|
|
82 |
title=title,
|
83 |
description=description,
|
84 |
submit_btn="Generate Instructions Response Pair",
|