research14 commited on
Commit
ec7540b
1 Parent(s): d4d5003

Added test code for app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -10
app.py CHANGED
@@ -1,16 +1,30 @@
1
  import gradio as gr
 
 
 
 
 
 
2
 
3
- from transformers import pipeline
 
4
 
5
- pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-en-es")
 
 
 
6
 
7
- def predict(text):
8
- return pipe(text)[0]["translation_text"]
9
-
10
- demo = gr.Interface(
11
- fn=predict,
12
- inputs='text',
13
- outputs='text',
 
 
 
 
14
  )
15
 
16
- demo.launch()
 
1
  import gradio as gr
2
+ import os
3
+ import json
4
+ import openai
5
+ import torch
6
+ from transformers import AutoTokenizer, AutoModelForCausalLM
7
+ from run_llm import model_mapping, fastchat # Import the necessary function from run_llm.py
8
 
9
+ # Set your OpenAI API key
10
+ openai.api_key = "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"
11
 
12
+ def generate_text(input_text, model, prompt_type):
13
+ # Use the fastchat function from run_llm.py
14
+ outputs = fastchat(input_text, model, prompt_type)
15
+ return outputs
16
 
17
+ iface = gr.Interface(
18
+ fn=generate_text,
19
+ inputs=[
20
+ gr.Textbox("input_text", label="Input Text"),
21
+ gr.Dropdown(
22
+ list(model_mapping.keys()),
23
+ label="Model"
24
+ ),
25
+ gr.Radio([1, 2], label="Prompt Type"),
26
+ ],
27
+ outputs=gr.Textbox("output_text", label="Generated Text")
28
  )
29
 
30
+ iface.launch()