sarves commited on
Commit
39e0d83
1 Parent(s): e997d74

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -4
app.py CHANGED
@@ -2,12 +2,15 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
3
  import torch
4
 
5
- title = "IEEE CIS Summer School - 2023"
 
6
  description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
7
  examples = [["How are you?"]]
8
 
9
- tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill", padding_side='left') # Set padding_side='left'
10
- model = AutoModelForCausalLM.from_pretrained("facebook/blenderbot-400M-distill")
 
 
11
 
12
  def predict(input, history=[]):
13
  # tokenize the new input sentence
@@ -24,7 +27,7 @@ def predict(input, history=[]):
24
  ).tolist()
25
 
26
  # convert the tokens to text, and then split the responses into lines
27
- response = tokenizer.decode(history[0]).split("")
28
  # print('decoded_response-->>'+str(response))
29
  response = [
30
  (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
@@ -32,6 +35,7 @@ def predict(input, history=[]):
32
  # print('response-->>'+str(response))
33
  return response, history
34
 
 
35
  gr.Interface(
36
  fn=predict,
37
  title=title,
 
2
  import gradio as gr
3
  import torch
4
 
5
+
6
+ title = "IEEE Summer School"
7
  description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
8
  examples = [["How are you?"]]
9
 
10
+
11
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
12
+ model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
13
+
14
 
15
  def predict(input, history=[]):
16
  # tokenize the new input sentence
 
27
  ).tolist()
28
 
29
  # convert the tokens to text, and then split the responses into lines
30
+ response = tokenizer.decode(history[0]).split("<|endoftext|>")
31
  # print('decoded_response-->>'+str(response))
32
  response = [
33
  (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
 
35
  # print('response-->>'+str(response))
36
  return response, history
37
 
38
+
39
  gr.Interface(
40
  fn=predict,
41
  title=title,