# Path: chatbot_model_based_autocorrect.py import os, getpass from langchain_openai import ChatOpenAI from langchain_core.messages import HumanMessage import gradio as gr import openai openai.api_key = os.getenv("OPENAI_API_KEY") # Define the language model model = ChatOpenAI(model="gpt-4o-mini") # Function to generate a conversational response with model-based autocorrection def chatbot_autocorrect_response(input_text: str): # Step 1: Define the prompt asking the model to correct the sentence prompt = ( f"The user said: '{input_text}'. Please correct this sentence if necessary, and make it sounds friendly, casual tone, acknowledging the correction and make it sounds like american native conversation. If appropriate, make it sounds like an IELTS 9.0 level response. Please only respond with the corrected sentence. If nothing needs to be changed, repeat the sentence." ) # Step 2: Send the prompt to the model human_message = HumanMessage(content=prompt) response = model.invoke([human_message]) # Step 3: Return the response from the model return response.content # Example usage def gradio_chatbot(input_text): # Pass the user's input to the chatbot function and get the response return chatbot_autocorrect_response(input_text) # Launch Gradio interface interface = gr.Interface(fn=gradio_chatbot, inputs="text", outputs="text", title="Chatbot with Auto-Correction") interface.launch()