gokaygokay commited on
Commit
754b512
1 Parent(s): 5c7ec8d
Files changed (1) hide show
  1. app.py +42 -46
app.py CHANGED
@@ -23,12 +23,12 @@ def create_interface():
23
 
24
  with gr.Row():
25
  with gr.Column(scale=2):
26
- with gr.Accordion("Basic Settings"):
27
  custom = gr.Textbox(label="Custom Input Prompt (optional)")
28
-
29
  with gr.Accordion("Prompt Generation Options", open=False):
30
  prompt_type = gr.Dropdown(
31
- choices=["Long", "Short", "Medium", "Long"],
32
  label="Prompt Type",
33
  value="Long",
34
  interactive=True
@@ -45,14 +45,7 @@ def create_interface():
45
  prompt_type.change(update_prompt_type, inputs=[prompt_type], outputs=[prompt_type])
46
 
47
  with gr.Column(scale=2):
48
- generate_button = gr.Button("Generate Prompt")
49
-
50
- with gr.Accordion("Generated Prompt", open=True):
51
- output = gr.Textbox(label="Generated Prompt", lines=4, show_copy_button=True)
52
- text_output = gr.Textbox(label="LLM Generated Text", lines=10, show_copy_button=True)
53
-
54
- with gr.Column(scale=2):
55
- with gr.Accordion("""LLM Prompt Generation""", open=False):
56
  long_talk = gr.Checkbox(label="Long Talk", value=True)
57
  compress = gr.Checkbox(label="Compress", value=True)
58
  compression_level = gr.Dropdown(
@@ -72,8 +65,9 @@ def create_interface():
72
  api_key = gr.Textbox(label="API Key", type="password", visible=False)
73
  model = gr.Dropdown(label="Model", choices=[], value="")
74
 
75
- generate_text_button = gr.Button("Generate Prompt with LLM")
76
- text_output = gr.Textbox(label="Generated Text", lines=10, show_copy_button=True)
 
77
 
78
  # Initialize Models based on provider
79
  def update_model_choices(provider):
@@ -91,42 +85,44 @@ def create_interface():
91
  llm_provider.change(update_model_choices, inputs=[llm_provider], outputs=[model])
92
  llm_provider.change(update_api_key_visibility, inputs=[llm_provider], outputs=[api_key])
93
 
94
- # Generate Prompt Function
95
- def generate_prompt(prompt_type, custom_input):
96
- dynamic_seed = random.randint(0, 1000000)
97
- result = llm_node.generate_prompt(dynamic_seed, prompt_type, custom_input)
98
- return result
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
- generate_button.click(
101
- generate_prompt,
102
- inputs=[prompt_type, custom],
103
- outputs=[output]
104
- )
105
 
106
- # Generate Text with LLM
107
- def generate_text_with_llm(output_prompt, long_talk, compress, compression_level, custom_base_prompt, provider, api_key, model_selected):
108
- global selected_prompt_type
109
- poster = False # Set a default value or modify as needed
110
- result = llm_node.generate(
111
- input_text=output_prompt,
112
- long_talk=long_talk,
113
- compress=compress,
114
- compression_level=compression_level,
115
- poster=poster, # Added the missing 'poster' argument
116
- prompt_type=selected_prompt_type,
117
- custom_base_prompt=custom_base_prompt,
118
- provider=provider,
119
- api_key=api_key,
120
- model=model_selected
121
- )
122
- selected_prompt_type = "Long"
123
- return result
124
-
125
- generate_text_button.click(
126
- generate_text_with_llm,
127
- inputs=[output, long_talk, compress, compression_level, custom_base_prompt, llm_provider, api_key, model],
128
  outputs=[text_output],
129
- api_name="generate_text"
130
  )
131
 
132
  return demo
 
23
 
24
  with gr.Row():
25
  with gr.Column(scale=2):
26
+ with gr.Accordion("Settings"):
27
  custom = gr.Textbox(label="Custom Input Prompt (optional)")
28
+
29
  with gr.Accordion("Prompt Generation Options", open=False):
30
  prompt_type = gr.Dropdown(
31
+ choices=["Long", "Short", "Medium", "OnlyObjects", "NoFigure", "Landscape", "Fantasy"],
32
  label="Prompt Type",
33
  value="Long",
34
  interactive=True
 
45
  prompt_type.change(update_prompt_type, inputs=[prompt_type], outputs=[prompt_type])
46
 
47
  with gr.Column(scale=2):
48
+ with gr.Accordion("LLM Prompt Generation", open=False):
 
 
 
 
 
 
 
49
  long_talk = gr.Checkbox(label="Long Talk", value=True)
50
  compress = gr.Checkbox(label="Compress", value=True)
51
  compression_level = gr.Dropdown(
 
65
  api_key = gr.Textbox(label="API Key", type="password", visible=False)
66
  model = gr.Dropdown(label="Model", choices=[], value="")
67
 
68
+ # **Single Button for Generating Prompt and Text**
69
+ generate_button = gr.Button("Generate Random Prompt with LLM")
70
+ text_output = gr.Textbox(label="LLM Generated Text", lines=10, show_copy_button=True)
71
 
72
  # Initialize Models based on provider
73
  def update_model_choices(provider):
 
85
  llm_provider.change(update_model_choices, inputs=[llm_provider], outputs=[model])
86
  llm_provider.change(update_api_key_visibility, inputs=[llm_provider], outputs=[api_key])
87
 
88
+ # **Unified Function to Generate Prompt and Text**
89
+ def generate_random_prompt_with_llm(custom_input, prompt_type, long_talk, compress, compression_level, custom_base_prompt, provider, api_key, model_selected):
90
+ try:
91
+ # Step 1: Generate Prompt
92
+ dynamic_seed = random.randint(0, 1000000)
93
+ prompt = llm_node.generate_prompt(dynamic_seed, prompt_type, custom_input)
94
+ print(f"Generated Prompt: {prompt}")
95
+
96
+ # Step 2: Generate Text with LLM
97
+ poster = False # Set a default value or modify as needed
98
+ result = llm_node.generate(
99
+ input_text=prompt,
100
+ long_talk=long_talk,
101
+ compress=compress,
102
+ compression_level=compression_level,
103
+ poster=poster, # Added the missing 'poster' argument
104
+ prompt_type=selected_prompt_type,
105
+ custom_base_prompt=custom_base_prompt,
106
+ provider=provider,
107
+ api_key=api_key,
108
+ model=model_selected
109
+ )
110
+ print(f"Generated Text: {result}")
111
 
112
+ # Reset selected_prompt_type if necessary
113
+ selected_prompt_type = "Long"
114
+ return result
 
 
115
 
116
+ except Exception as e:
117
+ print(f"An error occurred: {e}")
118
+ return f"Error occurred while processing the request: {str(e)}"
119
+
120
+ # **Connect the Unified Function to the Single Button**
121
+ generate_button.click(
122
+ generate_random_prompt_with_llm,
123
+ inputs=[custom, prompt_type, long_talk, compress, compression_level, custom_base_prompt, llm_provider, api_key, model],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  outputs=[text_output],
125
+ api_name="generate_random_prompt_with_llm"
126
  )
127
 
128
  return demo