Spaces:
Runtime error
Runtime error
VikasQblocks
commited on
Commit
•
58754bb
1
Parent(s):
ddd4e1d
Add async execution and add temperature to input
Browse files- __pycache__/MonsterAPIClient.cpython-310.pyc +0 -0
- gradio_app.py +50 -17
__pycache__/MonsterAPIClient.cpython-310.pyc
ADDED
Binary file (6.96 kB). View file
|
|
gradio_app.py
CHANGED
@@ -1,29 +1,58 @@
|
|
1 |
import gradio as gr
|
2 |
import requests
|
3 |
from tqdm import tqdm
|
|
|
4 |
from MonsterAPIClient import MClient
|
5 |
from MonsterAPIClient import MODELS_TO_DATAMODEL
|
6 |
client = MClient()
|
7 |
|
8 |
-
|
9 |
-
|
10 |
# Available models list
|
11 |
-
EXCLUSION_LIST = ['mpt-30B-instruct']
|
12 |
available_models = list(set(list(MODELS_TO_DATAMODEL.keys())) - set(EXCLUSION_LIST))
|
13 |
|
14 |
-
def generate_model_output(model, input_text):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
try:
|
16 |
-
response = client.get_response(model, {
|
|
|
|
|
|
|
17 |
output = client.wait_and_get_result(response['process_id'])
|
18 |
-
return output
|
19 |
except Exception as e:
|
20 |
-
return f"Error occurred: {str(e)}"
|
21 |
|
22 |
-
|
23 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
24 |
outputs = {}
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
27 |
ret_outputs = []
|
28 |
for model in available_models:
|
29 |
if model not in outputs:
|
@@ -32,16 +61,20 @@ def generate_output(selected_models, input_text, available_models=available_mode
|
|
32 |
ret_outputs.append(outputs[model].replace("\n", "<br>"))
|
33 |
|
34 |
return ret_outputs
|
35 |
-
|
36 |
output_components = [gr.outputs.Textbox(label=model) for model in available_models]
|
37 |
|
38 |
-
checkboxes = gr.inputs.CheckboxGroup(available_models
|
39 |
-
textbox = gr.inputs.Textbox()
|
|
|
40 |
|
41 |
-
# Gradio Interface
|
42 |
input_text = gr.Interface(
|
43 |
fn=generate_output,
|
44 |
-
inputs=[
|
|
|
|
|
|
|
|
|
45 |
outputs=output_components,
|
46 |
live=False,
|
47 |
capture_session=True,
|
@@ -51,4 +84,4 @@ input_text = gr.Interface(
|
|
51 |
)
|
52 |
|
53 |
# Launch the Gradio app
|
54 |
-
input_text.launch()
|
|
|
1 |
import gradio as gr
|
2 |
import requests
|
3 |
from tqdm import tqdm
|
4 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
5 |
from MonsterAPIClient import MClient
|
6 |
from MonsterAPIClient import MODELS_TO_DATAMODEL
|
7 |
client = MClient()
|
8 |
|
|
|
|
|
9 |
# Available models list
|
10 |
+
EXCLUSION_LIST = ['mpt-30B-instruct', 'llama2-7b-chat', 'openllama-13b-base']
|
11 |
available_models = list(set(list(MODELS_TO_DATAMODEL.keys())) - set(EXCLUSION_LIST))
|
12 |
|
13 |
+
def generate_model_output(model: str, input_text: str, temp: float = 0.98) -> str:
|
14 |
+
"""
|
15 |
+
Generate output from a specific model.
|
16 |
+
|
17 |
+
Parameters:
|
18 |
+
model (str): The name of the model.
|
19 |
+
input_text (str): The input prompt for the model.
|
20 |
+
temp (float, optional): The temperature value for text generation. Defaults to 0.98.
|
21 |
+
|
22 |
+
Returns:
|
23 |
+
str: The generated output text.
|
24 |
+
"""
|
25 |
try:
|
26 |
+
response = client.get_response(model, {
|
27 |
+
"prompt": input_text,
|
28 |
+
"temp": temp,
|
29 |
+
})
|
30 |
output = client.wait_and_get_result(response['process_id'])
|
31 |
+
return model, output
|
32 |
except Exception as e:
|
33 |
+
return model, f"Error occurred: {str(e)}"
|
34 |
|
35 |
+
def generate_output(selected_models: list, input_text: str, temp: float = 0.98,
|
36 |
+
available_models: list = available_models) -> list:
|
37 |
+
"""
|
38 |
+
Generate outputs from selected models using Monster API.
|
39 |
+
|
40 |
+
Parameters:
|
41 |
+
selected_models (list): List of selected model names.
|
42 |
+
input_text (str): The input prompt for the models.
|
43 |
+
temp (float, optional): The temperature value for text generation. Defaults to 0.98.
|
44 |
+
available_models (list, optional): List of available model names. Defaults to global variable.
|
45 |
+
|
46 |
+
Returns:
|
47 |
+
list: List of generated output texts corresponding to each model.
|
48 |
+
"""
|
49 |
outputs = {}
|
50 |
+
with ThreadPoolExecutor() as executor:
|
51 |
+
future_to_model = {executor.submit(generate_model_output, model, input_text, temp): model for model in selected_models}
|
52 |
+
for future in tqdm(as_completed(future_to_model), total=len(selected_models)):
|
53 |
+
model, output = future.result()
|
54 |
+
outputs[model] = output
|
55 |
+
|
56 |
ret_outputs = []
|
57 |
for model in available_models:
|
58 |
if model not in outputs:
|
|
|
61 |
ret_outputs.append(outputs[model].replace("\n", "<br>"))
|
62 |
|
63 |
return ret_outputs
|
64 |
+
|
65 |
output_components = [gr.outputs.Textbox(label=model) for model in available_models]
|
66 |
|
67 |
+
checkboxes = gr.inputs.CheckboxGroup(available_models, label="Select models to generate outputs:")
|
68 |
+
textbox = gr.inputs.Textbox(label="Input Prompt")
|
69 |
+
temp = gr.inputs.Slider(minimum=0.0, maximum=1.0, default=0.98, label="Temperature", step=0.01)
|
70 |
|
|
|
71 |
input_text = gr.Interface(
|
72 |
fn=generate_output,
|
73 |
+
inputs=[
|
74 |
+
checkboxes,
|
75 |
+
textbox,
|
76 |
+
temp
|
77 |
+
],
|
78 |
outputs=output_components,
|
79 |
live=False,
|
80 |
capture_session=True,
|
|
|
84 |
)
|
85 |
|
86 |
# Launch the Gradio app
|
87 |
+
input_text.launch()
|