Spaces:
Sleeping
Sleeping
File size: 1,507 Bytes
c618779 36ed299 7bb6784 d689625 91526e3 da3b78a d0304da 9acfdb2 22ec340 9acfdb2 22ec340 9acfdb2 22ec340 9acfdb2 22ec340 9acfdb2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import os
import gradio as gr
import pinecone
from gpt_index import GPTIndexMemory, GPTPineconeIndex
from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain import OpenAI
from langchain.agents import initialize_agent
OPENAI_API_KEY=os.environ.get("OPENAI_API_KEY")
PINECONE_API_KEY=os.environ.get("PINECONE_API_KEY")
PINECONE_ENV=os.environ.get("PINECONE_ENV")
pindex=pinecone.Index("sethgodin")
pinedex=GPTPineconeIndex([], pinecone_index=pindex)
tools = [
Tool(
name = "GPT Index",
func=lambda q: str(pinedex.query(q)),
description="useful for when you want to answer questions about the author. The input to this tool should be a complete english sentence.",
return_direct=True
),
]
memory = ConversationBufferMemory(memory_key="chat_history")
llm=OpenAI(temperature=0)
agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory)
def predict(input, history=[]):
# generate a response
history = agent_chain.run(input=input)
response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list
return response, history
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
state = gr.State([])
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
txt.submit(predict, [txt, state], [chatbot, state])
demo.launch() |