Spaces:
Running
Running
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Path: chatbot_model_based_autocorrect.py
|
2 |
+
import os, getpass
|
3 |
+
from langchain_openai import ChatOpenAI
|
4 |
+
from langchain_core.messages import HumanMessage
|
5 |
+
import gradio as gr
|
6 |
+
|
7 |
+
openai.api_key = os.getenv("OPENAI_API_KEY")
|
8 |
+
# Define the language model
|
9 |
+
model = ChatOpenAI(model="gpt-4o-mini")
|
10 |
+
|
11 |
+
# Function to generate a conversational response with model-based autocorrection
|
12 |
+
def chatbot_autocorrect_response(input_text: str):
|
13 |
+
# Step 1: Define the prompt asking the model to correct the sentence
|
14 |
+
prompt = (
|
15 |
+
f"The user said: '{input_text}'. Please correct this sentence if necessary, and make it sounds friendly, casual tone, acknowledging the correction and make it sounds like american native conversation. If appropriate, make it sounds like an IELTS 9.0 level response. Please only respond with the corrected sentence. If nothing needs to be changed, repeat the sentence."
|
16 |
+
)
|
17 |
+
|
18 |
+
# Step 2: Send the prompt to the model
|
19 |
+
human_message = HumanMessage(content=prompt)
|
20 |
+
response = model.invoke([human_message])
|
21 |
+
|
22 |
+
# Step 3: Return the response from the model
|
23 |
+
return response.content
|
24 |
+
|
25 |
+
# Example usage
|
26 |
+
def gradio_chatbot(input_text):
|
27 |
+
# Pass the user's input to the chatbot function and get the response
|
28 |
+
return chatbot_autocorrect_response(input_text)
|
29 |
+
|
30 |
+
# Launch Gradio interface
|
31 |
+
interface = gr.Interface(fn=gradio_chatbot, inputs="text", outputs="text", title="Chatbot with Auto-Correction")
|
32 |
+
interface.launch()
|