kookoobau commited on
Commit
03813a3
1 Parent(s): 9c714c1
Files changed (2) hide show
  1. app.py +5 -10
  2. requirements.txt +1 -3
app.py CHANGED
@@ -1,18 +1,13 @@
 
1
  from langchain import PromptTemplate, LLMChain
2
- from langchain.llms import GPT4All
3
  from langchain.memory import ConversationBufferMemory
4
  from langchain.chains import ConversationChain
5
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
6
- from huggingface_hub import hf_hub_download
7
- from transformers import AutoTokenizer, AutoModelForCausalLM
8
  import gradio as gr
9
 
10
- # model_path = hf_hub_download(repo_id="microsoft/DialoGPT-medium", filename="pytorch_model.bin")
11
  # Load the tokenizer and model
12
- tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
13
-
14
- model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
15
-
16
 
17
  template = """Question: {question}
18
  ------------------
@@ -24,8 +19,8 @@ prompt = PromptTemplate(template=template, input_variables=["question"])
24
  memory = ConversationBufferMemory()
25
  # Callbacks support token-wise streaming
26
  callbacks = [StreamingStdOutCallbackHandler()]
27
- # Verbose is required to pass to the callback manager
28
- llm = GPT4All(model=model, callbacks=callbacks, verbose=True)
29
 
30
  conversation = ConversationChain(llm=llm, memory=memory, callbacks=callbacks, prompt=prompt)
31
 
 
1
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
2
  from langchain import PromptTemplate, LLMChain
 
3
  from langchain.memory import ConversationBufferMemory
4
  from langchain.chains import ConversationChain
5
  from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
 
 
6
  import gradio as gr
7
 
 
8
  # Load the tokenizer and model
9
+ tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
10
+ model = GPT2LMHeadModel.from_pretrained("gpt2")
 
 
11
 
12
  template = """Question: {question}
13
  ------------------
 
19
  memory = ConversationBufferMemory()
20
  # Callbacks support token-wise streaming
21
  callbacks = [StreamingStdOutCallbackHandler()]
22
+ # Instantiate the LLMChain with the model and tokenizer
23
+ llm = LLMChain(model=model, tokenizer=tokenizer, callbacks=callbacks, verbose=True)
24
 
25
  conversation = ConversationChain(llm=llm, memory=memory, callbacks=callbacks, prompt=prompt)
26
 
requirements.txt CHANGED
@@ -1,5 +1,3 @@
1
  langchain==0.0.161
2
  gradio==2.3.0
3
- transformers
4
- torch
5
- pygpt4all
 
1
  langchain==0.0.161
2
  gradio==2.3.0
3
+ transformers