Spaces:
Sleeping
Sleeping
from langchain import HuggingFaceHub, PromptTemplate, LLMChain | |
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler | |
import gradio as gr | |
from getpass import getpass | |
import os | |
template = """Question: {question} | |
------------------ | |
Answer: Let's think step by step.""" | |
prompt = PromptTemplate(template=template, input_variables=["question"]) | |
# Callbacks support token-wise streaming | |
callbacks = [StreamingStdOutCallbackHandler()] | |
# Instantiate the Hugging Face model | |
repo_id = "gpt2" # Replace with the desired model | |
llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0, "max_length": 64}) | |
# Initialize the chain | |
llm_chain = LLMChain(prompt=prompt, llm=llm) | |
# Define the Gradio interface | |
def chatbot_interface(input_text): | |
response = llm_chain.run(input_text) | |
return response | |
# Define the Gradio app | |
gradio_app = gr.Interface( | |
fn=chatbot_interface, | |
inputs=gr.inputs.Textbox(label="Say something..."), | |
outputs=gr.outputs.Textbox(), | |
title="ConversationChain Chatbot", | |
description="A chatbot interface powered by ConversationChain and Hugging Face.", | |
) | |
# Run the Gradio app | |
if __name__ == "__main__": | |
gradio_app.launch() | |