Spaces:
Sleeping
Sleeping
File size: 1,665 Bytes
c618779 36ed299 7bb6784 360e7d2 91526e3 7bd5936 e26b4b7 d0304da 9acfdb2 5208eb9 480f01a 3003569 5208eb9 476f9ac 9acfdb2 0235a56 360e7d2 0235a56 b24ce13 9acfdb2 22ec340 0235a56 22ec340 0235a56 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 |
import os
import gradio as gr
import pinecone
from gpt_index import GPTIndexMemory, GPTPineconeIndex
from langchain.agents import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain import OpenAI
from langchain.agents import initialize_agent
OPENAI_API_KEY=os.environ["OPENAI_API_KEY"]
PINECONE_API_KEY=os.environ["PINECONE_API_KEY"]
pinecone.init(api_key=PINECONE_API_KEY, environment="us-east1-gcp")
pindex=pinecone.Index("sethgodin")
indexed_pinecone=GPTPineconeIndex([], pinecone_index=pindex)
tools = [
Tool(
name = "GPT Index",
func=lambda q: str(indexed_pinecone.query(q)),
description="useful for when you want to answer questions about the author. The input to this tool should be a complete english sentence.",
return_direct=True
),
]
memory = GPTIndexMemory(index=indexed_pinecone, memory_key="chat_history", query_kwargs={"response_mode": "compact"})
llm=OpenAI(temperature=0)
agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory, verbose=True)
def predict(input, history=[]):
response = agent_chain.run(input)
history = history + [(input, response)]
response = history
# response = [response]
# return response, response
return response, response
with gr.Blocks() as demo:
chatbot = gr.Chatbot()
state = gr.State([])
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
txt.submit(predict, [txt, state], [chatbot, state])
# txt.submit(agent_executor.run, [txt, state], [chatbot, state])
demo.launch() |