kookoobau commited on
Commit
ccd354a
1 Parent(s): 8d768b7
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -1,10 +1,11 @@
1
  from langchain import PromptTemplate, LLMChain
2
  from langchain.llms import GPT4All
3
- from langchain.memory import ConversationMemory
 
4
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
5
- import gradio as gr
6
  from huggingface_hub import hf_hub_download
7
  from transformers import AutoTokenizer, AutoModelForCausalLM
 
8
 
9
  model_path = hf_hub_download(repo_id="microsoft/DialoGPT-medium", filename="tf_model.h5")
10
  # Load the tokenizer and model
@@ -19,7 +20,7 @@ Answer: Let's think step by step."""
19
  prompt = PromptTemplate(template=template, input_variables=["question"])
20
 
21
  # Create a memory module with a maximum capacity of 1000 items
22
- memory = ConversationMemory(max_capacity=1000)
23
  # Callbacks support token-wise streaming
24
  callbacks = [StreamingStdOutCallbackHandler()]
25
  # Verbose is required to pass to the callback manager
@@ -30,7 +31,8 @@ llm_chain = LLMChain(prompt=prompt, llm=llm, memory=memory)
30
  # Define the Gradio interface
31
  def chatbot_interface(input_text):
32
  response = llm_chain.run(input_text)
33
- memory.store(llm_chain.last_input, llm_chain.last_output)
 
34
  return response
35
 
36
  # Define the Gradio app
 
1
  from langchain import PromptTemplate, LLMChain
2
  from langchain.llms import GPT4All
3
+ from langchain.memory import ConversationBufferMemory
4
+ from langchain.chains import ConversationChain
5
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
 
6
  from huggingface_hub import hf_hub_download
7
  from transformers import AutoTokenizer, AutoModelForCausalLM
8
+ import gradio as gr
9
 
10
  model_path = hf_hub_download(repo_id="microsoft/DialoGPT-medium", filename="tf_model.h5")
11
  # Load the tokenizer and model
 
20
  prompt = PromptTemplate(template=template, input_variables=["question"])
21
 
22
  # Create a memory module with a maximum capacity of 1000 items
23
+ memory = ConversationBufferMemory()
24
  # Callbacks support token-wise streaming
25
  callbacks = [StreamingStdOutCallbackHandler()]
26
  # Verbose is required to pass to the callback manager
 
31
  # Define the Gradio interface
32
  def chatbot_interface(input_text):
33
  response = llm_chain.run(input_text)
34
+ memory.chat_memory.add_user_message(input_text)
35
+ memory.chat_memory.add_ai_message(response)
36
  return response
37
 
38
  # Define the Gradio app