from langchain import HuggingFaceHub, PromptTemplate, LLMChain from langchain.memory import ConversationBufferMemory from langchain.agents import AgentType, initialize_agent from langchain.chains import AgentChain from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler import gradio as gr from getpass import getpass import os HUGGINGFACEHUB_API_TOKEN = getpass() os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN template = """Question: {history} ------------------ Answer: Let's think step by step.""" prompt = PromptTemplate(template=template, input_variables=["history"]) # Create a memory module with a maximum capacity of 1000 items memory = ConversationBufferMemory(max_capacity=1000) # Callbacks support token-wise streaming callbacks = [StreamingStdOutCallbackHandler()] # Instantiate the Hugging Face model repo_id = "google/flan-t5-xl" # Replace with the desired model llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0, "max_length": 64}) # Define the tools tools = [] # Initialize the chain llm_chain = LLMChain(prompt=prompt, llm=llm) # Define the Gradio interface def chatbot_interface(input_text): response = llm_chain.run(input_text) return response # Define the Gradio app gradio_app = gr.Interface( fn=chatbot_interface, inputs=gr.inputs.Textbox(label="Say something..."), outputs=gr.outputs.Textbox(), title="ConversationChain Chatbot", description="A chatbot interface powered by ConversationChain and Hugging Face.", ) # Run the Gradio app if __name__ == "__main__": gradio_app.run()