kookoobau commited on
Commit
c4be697
1 Parent(s): e8f4525
Files changed (1) hide show
  1. app.py +10 -8
app.py CHANGED
@@ -1,12 +1,15 @@
1
- from langchain import HuggingFaceHub, PromptTemplate
2
  from langchain.memory import ConversationBufferMemory
3
  from langchain.agents import AgentType, initialize_agent
4
  from langchain.chains import AgentChain
5
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
6
  import gradio as gr
 
7
  import os
8
 
9
- os.environ["HUGGINGFACEHUB_API_TOKEN"] = "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"
 
 
10
 
11
  template = """Question: {history}
12
  ------------------
@@ -19,19 +22,18 @@ memory = ConversationBufferMemory(max_capacity=1000)
19
  # Callbacks support token-wise streaming
20
  callbacks = [StreamingStdOutCallbackHandler()]
21
  # Instantiate the Hugging Face model
22
- llm = HuggingFaceHub(repo_id="gpt2")
 
23
 
24
  # Define the tools
25
  tools = []
26
 
27
- # Initialize the agent chain
28
- agent_chain = initialize_agent(tools, llm, agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, verbose=True, memory=memory)
29
-
30
- conversation = AgentChain(agent_chain, callbacks=callbacks, prompt=prompt)
31
 
32
  # Define the Gradio interface
33
  def chatbot_interface(input_text):
34
- response = conversation.predict(input_text)
35
  return response
36
 
37
  # Define the Gradio app
 
1
+ from langchain import HuggingFaceHub, PromptTemplate, LLMChain
2
  from langchain.memory import ConversationBufferMemory
3
  from langchain.agents import AgentType, initialize_agent
4
  from langchain.chains import AgentChain
5
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
6
  import gradio as gr
7
+ from getpass import getpass
8
  import os
9
 
10
+
11
+ HUGGINGFACEHUB_API_TOKEN = getpass()
12
+ os.environ["HUGGINGFACEHUB_API_TOKEN"] = HUGGINGFACEHUB_API_TOKEN
13
 
14
  template = """Question: {history}
15
  ------------------
 
22
  # Callbacks support token-wise streaming
23
  callbacks = [StreamingStdOutCallbackHandler()]
24
  # Instantiate the Hugging Face model
25
+ repo_id = "google/flan-t5-xl" # Replace with the desired model
26
+ llm = HuggingFaceHub(repo_id=repo_id, model_kwargs={"temperature": 0, "max_length": 64})
27
 
28
  # Define the tools
29
  tools = []
30
 
31
+ # Initialize the chain
32
+ llm_chain = LLMChain(prompt=prompt, llm=llm)
 
 
33
 
34
  # Define the Gradio interface
35
  def chatbot_interface(input_text):
36
+ response = llm_chain.run(input_text)
37
  return response
38
 
39
  # Define the Gradio app