gokaygokay
commited on
Commit
•
754b512
1
Parent(s):
5c7ec8d
trial
Browse files
app.py
CHANGED
@@ -23,12 +23,12 @@ def create_interface():
|
|
23 |
|
24 |
with gr.Row():
|
25 |
with gr.Column(scale=2):
|
26 |
-
with gr.Accordion("
|
27 |
custom = gr.Textbox(label="Custom Input Prompt (optional)")
|
28 |
-
|
29 |
with gr.Accordion("Prompt Generation Options", open=False):
|
30 |
prompt_type = gr.Dropdown(
|
31 |
-
choices=["Long", "Short", "Medium", "
|
32 |
label="Prompt Type",
|
33 |
value="Long",
|
34 |
interactive=True
|
@@ -45,14 +45,7 @@ def create_interface():
|
|
45 |
prompt_type.change(update_prompt_type, inputs=[prompt_type], outputs=[prompt_type])
|
46 |
|
47 |
with gr.Column(scale=2):
|
48 |
-
|
49 |
-
|
50 |
-
with gr.Accordion("Generated Prompt", open=True):
|
51 |
-
output = gr.Textbox(label="Generated Prompt", lines=4, show_copy_button=True)
|
52 |
-
text_output = gr.Textbox(label="LLM Generated Text", lines=10, show_copy_button=True)
|
53 |
-
|
54 |
-
with gr.Column(scale=2):
|
55 |
-
with gr.Accordion("""LLM Prompt Generation""", open=False):
|
56 |
long_talk = gr.Checkbox(label="Long Talk", value=True)
|
57 |
compress = gr.Checkbox(label="Compress", value=True)
|
58 |
compression_level = gr.Dropdown(
|
@@ -72,8 +65,9 @@ def create_interface():
|
|
72 |
api_key = gr.Textbox(label="API Key", type="password", visible=False)
|
73 |
model = gr.Dropdown(label="Model", choices=[], value="")
|
74 |
|
75 |
-
|
76 |
-
|
|
|
77 |
|
78 |
# Initialize Models based on provider
|
79 |
def update_model_choices(provider):
|
@@ -91,42 +85,44 @@ def create_interface():
|
|
91 |
llm_provider.change(update_model_choices, inputs=[llm_provider], outputs=[model])
|
92 |
llm_provider.change(update_api_key_visibility, inputs=[llm_provider], outputs=[api_key])
|
93 |
|
94 |
-
# Generate Prompt
|
95 |
-
def
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
99 |
|
100 |
-
|
101 |
-
|
102 |
-
|
103 |
-
outputs=[output]
|
104 |
-
)
|
105 |
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
compression_level=compression_level,
|
115 |
-
poster=poster, # Added the missing 'poster' argument
|
116 |
-
prompt_type=selected_prompt_type,
|
117 |
-
custom_base_prompt=custom_base_prompt,
|
118 |
-
provider=provider,
|
119 |
-
api_key=api_key,
|
120 |
-
model=model_selected
|
121 |
-
)
|
122 |
-
selected_prompt_type = "Long"
|
123 |
-
return result
|
124 |
-
|
125 |
-
generate_text_button.click(
|
126 |
-
generate_text_with_llm,
|
127 |
-
inputs=[output, long_talk, compress, compression_level, custom_base_prompt, llm_provider, api_key, model],
|
128 |
outputs=[text_output],
|
129 |
-
api_name="
|
130 |
)
|
131 |
|
132 |
return demo
|
|
|
23 |
|
24 |
with gr.Row():
|
25 |
with gr.Column(scale=2):
|
26 |
+
with gr.Accordion("Settings"):
|
27 |
custom = gr.Textbox(label="Custom Input Prompt (optional)")
|
28 |
+
|
29 |
with gr.Accordion("Prompt Generation Options", open=False):
|
30 |
prompt_type = gr.Dropdown(
|
31 |
+
choices=["Long", "Short", "Medium", "OnlyObjects", "NoFigure", "Landscape", "Fantasy"],
|
32 |
label="Prompt Type",
|
33 |
value="Long",
|
34 |
interactive=True
|
|
|
45 |
prompt_type.change(update_prompt_type, inputs=[prompt_type], outputs=[prompt_type])
|
46 |
|
47 |
with gr.Column(scale=2):
|
48 |
+
with gr.Accordion("LLM Prompt Generation", open=False):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
long_talk = gr.Checkbox(label="Long Talk", value=True)
|
50 |
compress = gr.Checkbox(label="Compress", value=True)
|
51 |
compression_level = gr.Dropdown(
|
|
|
65 |
api_key = gr.Textbox(label="API Key", type="password", visible=False)
|
66 |
model = gr.Dropdown(label="Model", choices=[], value="")
|
67 |
|
68 |
+
# **Single Button for Generating Prompt and Text**
|
69 |
+
generate_button = gr.Button("Generate Random Prompt with LLM")
|
70 |
+
text_output = gr.Textbox(label="LLM Generated Text", lines=10, show_copy_button=True)
|
71 |
|
72 |
# Initialize Models based on provider
|
73 |
def update_model_choices(provider):
|
|
|
85 |
llm_provider.change(update_model_choices, inputs=[llm_provider], outputs=[model])
|
86 |
llm_provider.change(update_api_key_visibility, inputs=[llm_provider], outputs=[api_key])
|
87 |
|
88 |
+
# **Unified Function to Generate Prompt and Text**
|
89 |
+
def generate_random_prompt_with_llm(custom_input, prompt_type, long_talk, compress, compression_level, custom_base_prompt, provider, api_key, model_selected):
|
90 |
+
try:
|
91 |
+
# Step 1: Generate Prompt
|
92 |
+
dynamic_seed = random.randint(0, 1000000)
|
93 |
+
prompt = llm_node.generate_prompt(dynamic_seed, prompt_type, custom_input)
|
94 |
+
print(f"Generated Prompt: {prompt}")
|
95 |
+
|
96 |
+
# Step 2: Generate Text with LLM
|
97 |
+
poster = False # Set a default value or modify as needed
|
98 |
+
result = llm_node.generate(
|
99 |
+
input_text=prompt,
|
100 |
+
long_talk=long_talk,
|
101 |
+
compress=compress,
|
102 |
+
compression_level=compression_level,
|
103 |
+
poster=poster, # Added the missing 'poster' argument
|
104 |
+
prompt_type=selected_prompt_type,
|
105 |
+
custom_base_prompt=custom_base_prompt,
|
106 |
+
provider=provider,
|
107 |
+
api_key=api_key,
|
108 |
+
model=model_selected
|
109 |
+
)
|
110 |
+
print(f"Generated Text: {result}")
|
111 |
|
112 |
+
# Reset selected_prompt_type if necessary
|
113 |
+
selected_prompt_type = "Long"
|
114 |
+
return result
|
|
|
|
|
115 |
|
116 |
+
except Exception as e:
|
117 |
+
print(f"An error occurred: {e}")
|
118 |
+
return f"Error occurred while processing the request: {str(e)}"
|
119 |
+
|
120 |
+
# **Connect the Unified Function to the Single Button**
|
121 |
+
generate_button.click(
|
122 |
+
generate_random_prompt_with_llm,
|
123 |
+
inputs=[custom, prompt_type, long_talk, compress, compression_level, custom_base_prompt, llm_provider, api_key, model],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
outputs=[text_output],
|
125 |
+
api_name="generate_random_prompt_with_llm"
|
126 |
)
|
127 |
|
128 |
return demo
|