Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -6,15 +6,6 @@ import gradio as gr
|
|
6 |
from pydantic import BaseModel, Field
|
7 |
from typing import Optional, Literal
|
8 |
|
9 |
-
|
10 |
-
import json
|
11 |
-
import re
|
12 |
-
import json
|
13 |
-
import re
|
14 |
-
from huggingface_hub import InferenceClient
|
15 |
-
from pydantic import BaseModel, Field
|
16 |
-
from typing import Optional, Literal
|
17 |
-
|
18 |
class PromptInput(BaseModel):
|
19 |
text: str = Field(..., description="The initial prompt text")
|
20 |
meta_prompt_choice: Literal["star","done","physics","morphosis", "verse", "phor","bolism"] = Field(..., description="Choice of meta prompt strategy")
|
@@ -58,19 +49,14 @@ class PromptRefiner:
|
|
58 |
)
|
59 |
response_content = response.choices[0].message.content.strip()
|
60 |
try:
|
61 |
-
# Extract JSON from between <json> tags
|
62 |
json_match = re.search(r'<json>\s*(.*?)\s*</json>', response_content, re.DOTALL)
|
63 |
if json_match:
|
64 |
json_str = json_match.group(1)
|
65 |
-
# Remove newlines and escape quotes within the JSON string
|
66 |
json_str = re.sub(r'\n\s*', ' ', json_str)
|
67 |
json_str = json_str.replace('"', '\\"')
|
68 |
-
# Wrap the entire string in quotes and parse it
|
69 |
json_output = json.loads(f'"{json_str}"')
|
70 |
-
# Ensure json_output is a dictionary
|
71 |
if isinstance(json_output, str):
|
72 |
json_output = json.loads(json_output)
|
73 |
-
# Unescape the parsed JSON
|
74 |
for key, value in json_output.items():
|
75 |
if isinstance(value, str):
|
76 |
json_output[key] = value.replace('\\"', '"')
|
@@ -80,7 +66,6 @@ class PromptRefiner:
|
|
80 |
except (json.JSONDecodeError, ValueError) as e:
|
81 |
print(f"Error parsing JSON: {e}")
|
82 |
print(f"Raw content: {response_content}")
|
83 |
-
# If JSON parsing fails, attempt to extract the content manually
|
84 |
output = {}
|
85 |
for key in ["initial_prompt_evaluation", "refined_prompt", "explanation_of_refinements"]:
|
86 |
pattern = rf'"{key}":\s*"(.*?)"(?:,|\}})'
|
@@ -88,10 +73,10 @@ class PromptRefiner:
|
|
88 |
if match:
|
89 |
output[key] = match.group(1).replace('\\n', '\n').replace('\\"', '"')
|
90 |
else:
|
91 |
-
output[key] = ""
|
92 |
return RefinementOutput(**output, raw_content=response_content)
|
93 |
|
94 |
-
def apply_prompt(self, prompt: str) -> str:
|
95 |
try:
|
96 |
messages = [
|
97 |
{"role": "system", "content": "You are a helpful assistant. Answer in stylized version with latex format or markdown if relevant. Separate your answer into logical sections using level 2 headers (##) for sections and bolding (**) for subsections.Incorporate a variety of lists, headers, and text to make the answer visually appealing"},
|
@@ -99,14 +84,13 @@ class PromptRefiner:
|
|
99 |
]
|
100 |
|
101 |
response = self.client.chat_completion(
|
102 |
-
model=
|
103 |
messages=messages,
|
104 |
-
max_tokens=4000,
|
105 |
temperature=0.8
|
106 |
)
|
107 |
|
108 |
output = response.choices[0].message.content.strip()
|
109 |
-
# Basic post-processing
|
110 |
output = output.replace('\n\n', '\n').strip()
|
111 |
return output
|
112 |
except Exception as e:
|
@@ -144,7 +128,22 @@ class GradioInterface:
|
|
144 |
outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
|
145 |
)
|
146 |
with gr.Row():
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
apply_button = gr.Button("Apply Prompts")
|
149 |
|
150 |
with gr.Row():
|
@@ -157,7 +156,7 @@ class GradioInterface:
|
|
157 |
|
158 |
apply_button.click(
|
159 |
fn=self.apply_prompts,
|
160 |
-
inputs=[prompt_text, refined_prompt],
|
161 |
outputs=[original_output, refined_output]
|
162 |
)
|
163 |
|
@@ -177,7 +176,6 @@ class GradioInterface:
|
|
177 |
inputs=[prompt_text, meta_prompt_choice]
|
178 |
)
|
179 |
|
180 |
-
|
181 |
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
|
182 |
input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
|
183 |
result = self.prompt_refiner.refine_prompt(input_data)
|
@@ -189,27 +187,25 @@ class GradioInterface:
|
|
189 |
result.dict()
|
190 |
)
|
191 |
|
192 |
-
def apply_prompts(self,original_prompt: str, refined_prompt: str):
|
193 |
-
original_output = self.prompt_refiner.apply_prompt(original_prompt)
|
194 |
-
refined_output = self.prompt_refiner.apply_prompt(refined_prompt)
|
195 |
return original_output, refined_output
|
196 |
|
197 |
-
|
198 |
def launch(self, share=False):
|
199 |
-
self.interface.launch()
|
200 |
-
|
201 |
# Main code to run the application
|
202 |
if __name__ == '__main__':
|
203 |
api_token = os.getenv('HF_API_TOKEN')
|
204 |
if not api_token:
|
205 |
raise ValueError("HF_API_TOKEN not found in environment variables")
|
206 |
|
207 |
-
|
208 |
-
metadone=os.getenv('metadone')
|
209 |
echo_prompt_refiner = os.getenv('echo_prompt_refiner')
|
210 |
metaprompt1 = os.getenv('metaprompt1')
|
211 |
loic_metaprompt = os.getenv('loic_metaprompt')
|
212 |
-
openai_metaprompt=os.getenv('openai_metaprompt')
|
213 |
original_meta_prompt = os.getenv('original_meta_prompt')
|
214 |
new_meta_prompt = os.getenv('new_meta_prompt')
|
215 |
advanced_meta_prompt = os.getenv('advanced_meta_prompt')
|
|
|
6 |
from pydantic import BaseModel, Field
|
7 |
from typing import Optional, Literal
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
class PromptInput(BaseModel):
|
10 |
text: str = Field(..., description="The initial prompt text")
|
11 |
meta_prompt_choice: Literal["star","done","physics","morphosis", "verse", "phor","bolism"] = Field(..., description="Choice of meta prompt strategy")
|
|
|
49 |
)
|
50 |
response_content = response.choices[0].message.content.strip()
|
51 |
try:
|
|
|
52 |
json_match = re.search(r'<json>\s*(.*?)\s*</json>', response_content, re.DOTALL)
|
53 |
if json_match:
|
54 |
json_str = json_match.group(1)
|
|
|
55 |
json_str = re.sub(r'\n\s*', ' ', json_str)
|
56 |
json_str = json_str.replace('"', '\\"')
|
|
|
57 |
json_output = json.loads(f'"{json_str}"')
|
|
|
58 |
if isinstance(json_output, str):
|
59 |
json_output = json.loads(json_output)
|
|
|
60 |
for key, value in json_output.items():
|
61 |
if isinstance(value, str):
|
62 |
json_output[key] = value.replace('\\"', '"')
|
|
|
66 |
except (json.JSONDecodeError, ValueError) as e:
|
67 |
print(f"Error parsing JSON: {e}")
|
68 |
print(f"Raw content: {response_content}")
|
|
|
69 |
output = {}
|
70 |
for key in ["initial_prompt_evaluation", "refined_prompt", "explanation_of_refinements"]:
|
71 |
pattern = rf'"{key}":\s*"(.*?)"(?:,|\}})'
|
|
|
73 |
if match:
|
74 |
output[key] = match.group(1).replace('\\n', '\n').replace('\\"', '"')
|
75 |
else:
|
76 |
+
output[key] = ""
|
77 |
return RefinementOutput(**output, raw_content=response_content)
|
78 |
|
79 |
+
def apply_prompt(self, prompt: str, model: str) -> str:
|
80 |
try:
|
81 |
messages = [
|
82 |
{"role": "system", "content": "You are a helpful assistant. Answer in stylized version with latex format or markdown if relevant. Separate your answer into logical sections using level 2 headers (##) for sections and bolding (**) for subsections.Incorporate a variety of lists, headers, and text to make the answer visually appealing"},
|
|
|
84 |
]
|
85 |
|
86 |
response = self.client.chat_completion(
|
87 |
+
model=model,
|
88 |
messages=messages,
|
89 |
+
max_tokens=4000,
|
90 |
temperature=0.8
|
91 |
)
|
92 |
|
93 |
output = response.choices[0].message.content.strip()
|
|
|
94 |
output = output.replace('\n\n', '\n').strip()
|
95 |
return output
|
96 |
except Exception as e:
|
|
|
128 |
outputs=[analysis_evaluation, refined_prompt, explanation_of_refinements, full_response_json]
|
129 |
)
|
130 |
with gr.Row():
|
131 |
+
apply_model = gr.Dropdown(
|
132 |
+
[
|
133 |
+
"meta-llama/Llama-2-70b-chat-hf",
|
134 |
+
"tiiuae/falcon-180B",
|
135 |
+
"bigscience/bloom",
|
136 |
+
"EleutherAI/gpt-neox-20b",
|
137 |
+
"google/flan-t5-xxl",
|
138 |
+
"facebook/opt-66b",
|
139 |
+
"Qwen/Qwen-72B",
|
140 |
+
"mistralai/Mistral-7B-Instruct-v0.1",
|
141 |
+
"microsoft/phi-2",
|
142 |
+
"THUDM/chatglm3-6b"
|
143 |
+
],
|
144 |
+
value="meta-llama/Llama-2-70b-chat-hf",
|
145 |
+
label="Choose Model"
|
146 |
+
)
|
147 |
apply_button = gr.Button("Apply Prompts")
|
148 |
|
149 |
with gr.Row():
|
|
|
156 |
|
157 |
apply_button.click(
|
158 |
fn=self.apply_prompts,
|
159 |
+
inputs=[prompt_text, refined_prompt, apply_model],
|
160 |
outputs=[original_output, refined_output]
|
161 |
)
|
162 |
|
|
|
176 |
inputs=[prompt_text, meta_prompt_choice]
|
177 |
)
|
178 |
|
|
|
179 |
def refine_prompt(self, prompt: str, meta_prompt_choice: str) -> tuple:
|
180 |
input_data = PromptInput(text=prompt, meta_prompt_choice=meta_prompt_choice)
|
181 |
result = self.prompt_refiner.refine_prompt(input_data)
|
|
|
187 |
result.dict()
|
188 |
)
|
189 |
|
190 |
+
def apply_prompts(self, original_prompt: str, refined_prompt: str, model: str):
|
191 |
+
original_output = self.prompt_refiner.apply_prompt(original_prompt, model)
|
192 |
+
refined_output = self.prompt_refiner.apply_prompt(refined_prompt, model)
|
193 |
return original_output, refined_output
|
194 |
|
|
|
195 |
def launch(self, share=False):
|
196 |
+
self.interface.launch(share=share)
|
197 |
+
|
198 |
# Main code to run the application
|
199 |
if __name__ == '__main__':
|
200 |
api_token = os.getenv('HF_API_TOKEN')
|
201 |
if not api_token:
|
202 |
raise ValueError("HF_API_TOKEN not found in environment variables")
|
203 |
|
204 |
+
metadone = os.getenv('metadone')
|
|
|
205 |
echo_prompt_refiner = os.getenv('echo_prompt_refiner')
|
206 |
metaprompt1 = os.getenv('metaprompt1')
|
207 |
loic_metaprompt = os.getenv('loic_metaprompt')
|
208 |
+
openai_metaprompt = os.getenv('openai_metaprompt')
|
209 |
original_meta_prompt = os.getenv('original_meta_prompt')
|
210 |
new_meta_prompt = os.getenv('new_meta_prompt')
|
211 |
advanced_meta_prompt = os.getenv('advanced_meta_prompt')
|