baconnier commited on
Commit
006bfbb
1 Parent(s): 2bfb23e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -61
app.py CHANGED
@@ -178,77 +178,35 @@ class PromptRefiner:
178
  "
179
  }
180
 
181
- Transform content while maintaining clear visual separation between elements. Each logical section should be clearly distinguished with appropriate spacing."""
182
- },
183
- {
184
- "role": "user",
185
- "content": prompt
186
- }
187
- ]
188
- ''' messages = [
189
- {
190
- "role": "system",
191
- "content": """You are a professional markdown formatting expert. Transform any content into well-structured documentation following these precise rules:
192
-
193
- 1. Document Structure:
194
- - Start with # for main title
195
- - Use ## for major sections
196
- - Use ### for subsections
197
- - Add > blockquote for key summaries or important notes
198
- - Separate major sections with ---
199
-
200
- 2. Content Types:
201
- Technical:
202
- - Use ```language for code blocks
203
- - Format inline code with `backticks`
204
- - Use $$ $$ for math equations
205
- - Create tables for comparisons or data
206
- - Use bullet points for features/characteristics
207
-
208
- Narrative:
209
- - Format dialogue with proper quotations
210
- - Use *italics* for emphasis
211
- - Keep paragraphs focused and concise
212
-
213
- Instructional:
214
- - Use numbered lists for steps
215
- - Bold key terms with **emphasis**
216
- - Add examples in code blocks
217
- - Use tables for parameter explanations
218
-
219
- 3. Visual Organization:
220
- - Maximum 3 levels of headers
221
- - Short paragraphs (3-5 sentences)
222
- - Consistent spacing between sections
223
- - Clear hierarchy in information
224
- - Strategic use of line breaks
225
-
226
- 4. Special Elements:
227
- - Tables for structured comparisons
228
- - Fenced code blocks with language specification
229
- - Blockquotes for summaries/key points
230
- - Lists only when necessary
231
- - LaTeX for mathematical notation
232
-
233
- Transform the content while maintaining clarity, professionalism, and readability. Focus on creating a logical flow that enhances understanding."""
234
  },
235
  {
236
  "role": "user",
237
  "content": prompt
238
  }
239
- ]'''
240
- response = self.client.chat_completion(
 
 
241
  model=model,
242
  messages=messages,
243
  max_tokens=3000,
244
- temperature=0.8
 
245
  )
246
 
247
- output = response.choices[0].message.content.strip()
248
- return output.replace('\n\n', '\n').strip()
249
 
 
 
 
 
 
 
 
250
  except Exception as e:
251
- return f"Error: {str(e)}"
252
 
253
  class GradioInterface:
254
  def __init__(self, prompt_refiner: PromptRefiner,custom_css):
@@ -352,7 +310,8 @@ class GradioInterface:
352
  apply_button.click(
353
  fn=self.apply_prompts,
354
  inputs=[prompt_text, refined_prompt, apply_model],
355
- outputs=[original_output, refined_output]
 
356
  )
357
 
358
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
@@ -371,7 +330,10 @@ class GradioInterface:
371
  def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
372
  original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
373
  refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
374
- return original_output, refined_output
 
 
 
375
 
376
  def launch(self, share=False):
377
  self.interface.launch(share=share)
 
178
  "
179
  }
180
 
181
+ Transform content while maintaining clear visual separation between elements."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182
  },
183
  {
184
  "role": "user",
185
  "content": prompt
186
  }
187
+ ]
188
+
189
+ # Use streaming for the response
190
+ response_stream = self.client.text_generation(
191
  model=model,
192
  messages=messages,
193
  max_tokens=3000,
194
+ temperature=0.8,
195
+ stream=True
196
  )
197
 
198
+ # Initialize an empty string to store the complete response
199
+ full_response = ""
200
 
201
+ # Process the stream
202
+ for response_chunk in response_stream:
203
+ if hasattr(response_chunk, 'token'):
204
+ chunk_text = response_chunk.token.text
205
+ full_response += chunk_text
206
+ yield full_response.replace('\n\n', '\n').strip()
207
+
208
  except Exception as e:
209
+ yield f"Error: {str(e)}"
210
 
211
  class GradioInterface:
212
  def __init__(self, prompt_refiner: PromptRefiner,custom_css):
 
310
  apply_button.click(
311
  fn=self.apply_prompts,
312
  inputs=[prompt_text, refined_prompt, apply_model],
313
+ outputs=[original_output, refined_output],
314
+ streaming=True
315
  )
316
 
317
  def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
 
330
  def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
331
  original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
332
  refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
333
+
334
+ # Create generators for both outputs
335
+ for original, refined in zip(original_output, refined_output):
336
+ yield original, refined
337
 
338
  def launch(self, share=False):
339
  self.interface.launch(share=share)