baconnier commited on
Commit
0711573
1 Parent(s): 9fc3b13

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -19
app.py CHANGED
@@ -86,14 +86,12 @@ class PromptRefiner:
86
  {"role": "system", "content": "You are a helpful assistant. Answer in stylized version with latex format or markdown if relevant. Separate your answer into logical sections using level 2 headers (##) for sections and bolding (**) for subsections.Incorporate a variety of lists, headers, and text to make the answer visually appealing"},
87
  {"role": "user", "content": prompt}
88
  ]
89
-
90
  response = self.client.chat_completion(
91
  model=model,
92
  messages=messages,
93
  max_tokens=2000,
94
  temperature=0.8
95
  )
96
-
97
  output = response.choices[0].message.content.strip()
98
  output = output.replace('\n\n', '\n').strip()
99
  return output
@@ -103,8 +101,6 @@ class PromptRefiner:
103
  class GradioInterface:
104
  def __init__(self, prompt_refiner: PromptRefiner):
105
  self.prompt_refiner = prompt_refiner
106
-
107
- # Define custom CSS for containers
108
  custom_css = """
109
  .container {
110
  border: 2px solid #2196F3;
@@ -126,7 +122,6 @@ class GradioInterface:
126
  font-size: 1.2em;
127
  }
128
 
129
- /* Remove default Gradio styles */
130
  .no-background > div:first-child {
131
  border: none !important;
132
  background: transparent !important;
@@ -140,7 +135,6 @@ class GradioInterface:
140
  .results-container::before { content: 'RESULTS'; }
141
  .examples-container::before { content: 'EXAMPLES'; }
142
 
143
- /* Custom styling for radio buttons */
144
  .radio-group {
145
  display: flex;
146
  gap: 10px;
@@ -149,13 +143,11 @@ class GradioInterface:
149
  """
150
 
151
  with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
152
- # Title Container
153
  with gr.Column(elem_classes=["container", "title-container"]):
154
  gr.Markdown("# PROMPT++")
155
  gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
156
- gr.Markdown("Learn how to generate an improved version of your prompts. Enter a main idea for a prompt, choose a meta prompt, and the model will attempt to generate an improved version.")
157
 
158
- # Input Container
159
  with gr.Column(elem_classes=["container", "input-container"]):
160
  prompt_text = gr.Textbox(
161
  label="Type the prompt (or let it empty to see metaprompt)",
@@ -171,7 +163,6 @@ class GradioInterface:
171
  )
172
  refine_button = gr.Button("Refine Prompt")
173
 
174
- # Analysis Container
175
  with gr.Column(elem_classes=["container", "analysis-container"]):
176
  gr.Markdown("### Initial prompt analysis")
177
  analysis_evaluation = gr.Markdown()
@@ -186,7 +177,6 @@ class GradioInterface:
186
  with gr.Accordion("Full Response JSON", open=False, visible=False):
187
  full_response_json = gr.JSON()
188
 
189
- # Model Application Container
190
  with gr.Column(elem_classes=["container", "model-container"]):
191
  gr.Markdown("## See MetaPrompt Impact")
192
  with gr.Row():
@@ -201,12 +191,11 @@ class GradioInterface:
201
  "microsoft/Phi-3.5-mini-instruct"
202
  ],
203
  value="meta-llama/Meta-Llama-3-70B-Instruct",
204
- label="Choose the Model to apply to the prompts (the one you will used)",
205
  elem_classes="no-background"
206
  )
207
  apply_button = gr.Button("Apply MetaPrompt")
208
 
209
- # Results Container
210
  with gr.Column(elem_classes=["container", "results-container"]):
211
  with gr.Tabs():
212
  with gr.TabItem("Original Prompt Output"):
@@ -214,7 +203,6 @@ class GradioInterface:
214
  with gr.TabItem("Refined Prompt Output"):
215
  refined_output = gr.Markdown()
216
 
217
- # Examples Container
218
  with gr.Column(elem_classes=["container", "examples-container"]):
219
  with gr.Accordion("Examples", open=True):
220
  gr.Examples(
@@ -228,12 +216,11 @@ class GradioInterface:
228
  ["Is nuclear energy good?", "verse"],
229
  ["How does a computer work?", "phor"],
230
  ["How to make money fast?", "done"],
231
- ["how can you prove IT0's lemma in stochastic calculus ?", "arpe"],
232
  ],
233
  inputs=[prompt_text, meta_prompt_choice]
234
  )
235
 
236
- # Connect the buttons to their functions
237
  refine_button.click(
238
  fn=self.refine_prompt,
239
  inputs=[prompt_text, meta_prompt_choice],
@@ -246,7 +233,6 @@ class GradioInterface:
246
  outputs=[original_output, refined_output]
247
  )
248
 
249
- # Your existing methods remain the same
250
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
251
  input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
252
  result = self.prompt_refiner.refine_prompt(input_data)
@@ -278,7 +264,6 @@ metaprompt_explanations = {
278
 
279
  explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
280
 
281
- # Main code to run the application
282
  if __name__ == '__main__':
283
  meta_info=""
284
  api_token = os.getenv('HF_API_TOKEN')
@@ -297,7 +282,6 @@ if __name__ == '__main__':
297
  math_meta_prompt = os.getenv('metamath')
298
  autoregressive_metaprompt = os.getenv('autoregressive_metaprompt')
299
 
300
-
301
  prompt_refiner = PromptRefiner(api_token)
302
  gradio_interface = GradioInterface(prompt_refiner)
303
  gradio_interface.launch(share=True)
 
86
  {"role": "system", "content": "You are a helpful assistant. Answer in stylized version with latex format or markdown if relevant. Separate your answer into logical sections using level 2 headers (##) for sections and bolding (**) for subsections.Incorporate a variety of lists, headers, and text to make the answer visually appealing"},
87
  {"role": "user", "content": prompt}
88
  ]
 
89
  response = self.client.chat_completion(
90
  model=model,
91
  messages=messages,
92
  max_tokens=2000,
93
  temperature=0.8
94
  )
 
95
  output = response.choices[0].message.content.strip()
96
  output = output.replace('\n\n', '\n').strip()
97
  return output
 
101
  class GradioInterface:
102
  def __init__(self, prompt_refiner: PromptRefiner):
103
  self.prompt_refiner = prompt_refiner
 
 
104
  custom_css = """
105
  .container {
106
  border: 2px solid #2196F3;
 
122
  font-size: 1.2em;
123
  }
124
 
 
125
  .no-background > div:first-child {
126
  border: none !important;
127
  background: transparent !important;
 
135
  .results-container::before { content: 'RESULTS'; }
136
  .examples-container::before { content: 'EXAMPLES'; }
137
 
 
138
  .radio-group {
139
  display: flex;
140
  gap: 10px;
 
143
  """
144
 
145
  with gr.Blocks(css=custom_css, theme=gr.themes.Default()) as self.interface:
 
146
  with gr.Column(elem_classes=["container", "title-container"]):
147
  gr.Markdown("# PROMPT++")
148
  gr.Markdown("### Automating Prompt Engineering by Refining your Prompts")
149
+ gr.Markdown("Learn how to generate an improved version of your prompts.")
150
 
 
151
  with gr.Column(elem_classes=["container", "input-container"]):
152
  prompt_text = gr.Textbox(
153
  label="Type the prompt (or let it empty to see metaprompt)",
 
163
  )
164
  refine_button = gr.Button("Refine Prompt")
165
 
 
166
  with gr.Column(elem_classes=["container", "analysis-container"]):
167
  gr.Markdown("### Initial prompt analysis")
168
  analysis_evaluation = gr.Markdown()
 
177
  with gr.Accordion("Full Response JSON", open=False, visible=False):
178
  full_response_json = gr.JSON()
179
 
 
180
  with gr.Column(elem_classes=["container", "model-container"]):
181
  gr.Markdown("## See MetaPrompt Impact")
182
  with gr.Row():
 
191
  "microsoft/Phi-3.5-mini-instruct"
192
  ],
193
  value="meta-llama/Meta-Llama-3-70B-Instruct",
194
+ label="Choose the Model",
195
  elem_classes="no-background"
196
  )
197
  apply_button = gr.Button("Apply MetaPrompt")
198
 
 
199
  with gr.Column(elem_classes=["container", "results-container"]):
200
  with gr.Tabs():
201
  with gr.TabItem("Original Prompt Output"):
 
203
  with gr.TabItem("Refined Prompt Output"):
204
  refined_output = gr.Markdown()
205
 
 
206
  with gr.Column(elem_classes=["container", "examples-container"]):
207
  with gr.Accordion("Examples", open=True):
208
  gr.Examples(
 
216
  ["Is nuclear energy good?", "verse"],
217
  ["How does a computer work?", "phor"],
218
  ["How to make money fast?", "done"],
219
+ ["how can you prove IT0's lemma in stochastic calculus ?", "arpe"],
220
  ],
221
  inputs=[prompt_text, meta_prompt_choice]
222
  )
223
 
 
224
  refine_button.click(
225
  fn=self.refine_prompt,
226
  inputs=[prompt_text, meta_prompt_choice],
 
233
  outputs=[original_output, refined_output]
234
  )
235
 
 
236
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
237
  input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
238
  result = self.prompt_refiner.refine_prompt(input_data)
 
264
 
265
  explanation_markdown = "".join([f"- **{key}**: {value}\n" for key, value in metaprompt_explanations.items()])
266
 
 
267
  if __name__ == '__main__':
268
  meta_info=""
269
  api_token = os.getenv('HF_API_TOKEN')
 
282
  math_meta_prompt = os.getenv('metamath')
283
  autoregressive_metaprompt = os.getenv('autoregressive_metaprompt')
284
 
 
285
  prompt_refiner = PromptRefiner(api_token)
286
  gradio_interface = GradioInterface(prompt_refiner)
287
  gradio_interface.launch(share=True)