Files changed (2) hide show
  1. app copy.py +70 -0
  2. app.py +19 -4
app copy.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ import pinecone
4
+ from llama_index import GPTIndexMemory, GPTPineconeIndex, QuestionAnswerPrompt
5
+ from langchain.agents import Tool
6
+ from langchain.chains.conversation.memory import ConversationBufferMemory
7
+ from langchain import OpenAI
8
+ from langchain.agents import initialize_agent
9
+
10
+ OPENAI_API_KEY="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"
11
+ os.environ['OPENAI_API_KEY'] = "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"
12
+ PINECONE_API_KEY="3fa304c8-f592-4ecb-b960-21a5966e80ab"
13
+
14
+ pinecone.init(api_key=PINECONE_API_KEY, environment="us-east1-gcp")
15
+
16
+ pindex=pinecone.Index("sejarah")
17
+
18
+ #define custom QuestionAnswerPrompt- adding clear and concise instruction to the model
19
+ query_str = "Bincangkan kesan sekiranya semangat assabiyah diamalkan dalam masyarakat hari ini."
20
+
21
+
22
+ QA_PROMPT_TMPL = (
23
+ "Answer each question truthfully using the Malaysia's Form 1 History textbook data provided."
24
+ "Your answers should be concise and straight to the point.For questions that are open-ended, which require subjective judgment or opinion, you may not find a definitive answer in the textbook."
25
+ "However, you should still address the question's directive based on the data's context. Ideally, your answer should provide 3 points that support your response. You are encouraged to better provide positive suggestions for concepts that are less ethical.Please keep in mind that the scope of the data provided is limited to the content covered in the Malaysia's Form 1 History textbook."
26
+ "If you do not know the answer please say 'I don't know, Please rephrase the question'\n"
27
+ "---------------------\n Context:\n"
28
+ "{context_str}"
29
+ "\n---------------------\n"
30
+ "Given this information, please answer the question: {query_str}\n"
31
+ )
32
+
33
+
34
+ QA_PROMPT = QuestionAnswerPrompt(QA_PROMPT_TMPL)
35
+
36
+
37
+ indexed_pinecone=GPTPineconeIndex([], pinecone_index=pindex)
38
+
39
+ tools = [
40
+ Tool(
41
+ name = "GPT Index",
42
+ func=lambda q: str(indexed_pinecone.query(q,text_qa_template=QA_PROMPT)),
43
+ description="Asking questions to Pandai AI",
44
+ return_direct=True
45
+ )
46
+ ]
47
+
48
+ memory = GPTIndexMemory(index=indexed_pinecone, memory_key="chat_history", query_kwargs={"response_mode": "compact"})
49
+ llm=OpenAI(temperature=0, model_name="text-davinci-003",max_tokens=1000)
50
+ agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory, verbose=True)
51
+
52
+ def predict(input, history=[]):
53
+ response = agent_chain.run(input)
54
+ history = history + [(input, response)]
55
+ response = history
56
+ # response = [response]
57
+ # return response, response
58
+ return response, response
59
+
60
+ with gr.Blocks() as demo:
61
+ chatbot = gr.Chatbot()
62
+ state = gr.State([])
63
+
64
+ with gr.Row():
65
+ txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
66
+
67
+ txt.submit(predict, [txt, state], [chatbot, state])
68
+ # txt.submit(agent_executor.run, [txt, state], [chatbot, state])
69
+
70
+ demo.launch()
app.py CHANGED
@@ -1,7 +1,7 @@
1
  import os
2
  import gradio as gr
3
  import pinecone
4
- from llama_index import GPTIndexMemory, GPTPineconeIndex
5
  from langchain.agents import Tool
6
  from langchain.chains.conversation.memory import ConversationBufferMemory
7
  from langchain import OpenAI
@@ -15,16 +15,31 @@ pinecone.init(api_key=PINECONE_API_KEY, environment="us-east1-gcp")
15
  pindex=pinecone.Index("sejarah")
16
  indexed_pinecone=GPTPineconeIndex([], pinecone_index=pindex)
17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  tools = [
19
  Tool(
20
  name = "GPT Index",
21
- func=lambda q: str(indexed_pinecone.query(q)),
22
- description="useful for when you want to answer questions about the author. The input to this tool should be a complete english sentence.",
23
  return_direct=True
24
  )
25
  ]
26
  memory = GPTIndexMemory(index=indexed_pinecone, memory_key="chat_history", query_kwargs={"response_mode": "compact"})
27
- llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo")
28
  agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory, verbose=True)
29
 
30
  def predict(input, history=[]):
 
1
  import os
2
  import gradio as gr
3
  import pinecone
4
+ from llama_index import GPTIndexMemory, GPTPineconeIndex,QuestionAnswerPrompt
5
  from langchain.agents import Tool
6
  from langchain.chains.conversation.memory import ConversationBufferMemory
7
  from langchain import OpenAI
 
15
  pindex=pinecone.Index("sejarah")
16
  indexed_pinecone=GPTPineconeIndex([], pinecone_index=pindex)
17
 
18
+
19
+ QA_PROMPT_TMPL = (
20
+ "Answer each question truthfully using the Malaysia's Form 1 History textbook data provided."
21
+ "Your answers should be concise and straight to the point.For questions that are open-ended, which require subjective judgment or opinion, you may not find a definitive answer in the textbook."
22
+ "However, you should still address the question's directive based on the data's context. Ideally, your answer should provide 3 points that support your response. You are encouraged to better provide positive suggestions for concepts that are less ethical.Please keep in mind that the scope of the data provided is limited to the content covered in the Malaysia's Form 1 History textbook."
23
+ "If you do not know the answer please say 'I don't know, Please rephrase the question'\n"
24
+ "---------------------\n Context:\n"
25
+ "{context_str}"
26
+ "\n---------------------\n"
27
+ "Given this information, please answer the question: {query_str}\n"
28
+ )
29
+
30
+
31
+ QA_PROMPT = QuestionAnswerPrompt(QA_PROMPT_TMPL)
32
+
33
  tools = [
34
  Tool(
35
  name = "GPT Index",
36
+ func=lambda q: str(indexed_pinecone.query(q,text_qa_template=QA_PROMPT)),
37
+ description="Asking questions to Pandai AI",
38
  return_direct=True
39
  )
40
  ]
41
  memory = GPTIndexMemory(index=indexed_pinecone, memory_key="chat_history", query_kwargs={"response_mode": "compact"})
42
+ llm=OpenAI(temperature=0, model_name="text-davinci-003",max_tokens=1000)
43
  agent_chain = initialize_agent(tools, llm, agent="conversational-react-description", memory=memory, verbose=True)
44
 
45
  def predict(input, history=[]):