kookoobau commited on
Commit
43ab23b
1 Parent(s): c4be697
Files changed (1) hide show
  1. app.py +0 -8
app.py CHANGED
@@ -1,7 +1,4 @@
1
  from langchain import HuggingFaceHub, PromptTemplate, LLMChain
2
- from langchain.memory import ConversationBufferMemory
3
- from langchain.agents import AgentType, initialize_agent
4
- from langchain.chains import AgentChain
5
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
6
  import gradio as gr
7
  from getpass import getpass
@@ -17,17 +14,12 @@ Answer: Let's think step by step."""
17
 
18
  prompt = PromptTemplate(template=template, input_variables=["history"])
19
 
20
- # Create a memory module with a maximum capacity of 1000 items
21
- memory = ConversationBufferMemory(max_capacity=1000)
22
  # Callbacks support token-wise streaming
23
  callbacks = [StreamingStdOutCallbackHandler()]
24
  # Instantiate the Hugging Face model
25
  repo_id = "google/flan-t5-xl" # Replace with the desired model
26
  llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0, "max_length": 64})
27
 
28
- # Define the tools
29
- tools = []
30
-
31
  # Initialize the chain
32
  llm_chain = LLMChain(prompt=prompt, llm=llm)
33
 
 
1
  from langchain import HuggingFaceHub, PromptTemplate, LLMChain
 
 
 
2
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
3
  import gradio as gr
4
  from getpass import getpass
 
14
 
15
  prompt = PromptTemplate(template=template, input_variables=["history"])
16
 
 
 
17
  # Callbacks support token-wise streaming
18
  callbacks = [StreamingStdOutCallbackHandler()]
19
  # Instantiate the Hugging Face model
20
  repo_id = "google/flan-t5-xl" # Replace with the desired model
21
  llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0, "max_length": 64})
22
 
 
 
 
23
  # Initialize the chain
24
  llm_chain = LLMChain(prompt=prompt, llm=llm)
25