sarves commited on
Commit
e997d74
1 Parent(s): 394f51e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -8
app.py CHANGED
@@ -1,19 +1,14 @@
1
-
2
-
3
  from transformers import AutoModelForCausalLM, AutoTokenizer
4
  import gradio as gr
5
  import torch
6
 
7
-
8
  title = "IEEE CIS Summer School - 2023"
9
  description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
10
  examples = [["How are you?"]]
11
 
12
-
13
- tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill")
14
  model = AutoModelForCausalLM.from_pretrained("facebook/blenderbot-400M-distill")
15
 
16
-
17
  def predict(input, history=[]):
18
  # tokenize the new input sentence
19
  new_user_input_ids = tokenizer.encode(
@@ -29,7 +24,7 @@ def predict(input, history=[]):
29
  ).tolist()
30
 
31
  # convert the tokens to text, and then split the responses into lines
32
- response = tokenizer.decode(history[0]).split("<|endoftext|>")
33
  # print('decoded_response-->>'+str(response))
34
  response = [
35
  (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
@@ -37,7 +32,6 @@ def predict(input, history=[]):
37
  # print('response-->>'+str(response))
38
  return response, history
39
 
40
-
41
  gr.Interface(
42
  fn=predict,
43
  title=title,
 
 
 
1
  from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
3
  import torch
4
 
 
5
  title = "IEEE CIS Summer School - 2023"
6
  description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
7
  examples = [["How are you?"]]
8
 
9
+ tokenizer = AutoTokenizer.from_pretrained("facebook/blenderbot-400M-distill", padding_side='left') # Set padding_side='left'
 
10
  model = AutoModelForCausalLM.from_pretrained("facebook/blenderbot-400M-distill")
11
 
 
12
  def predict(input, history=[]):
13
  # tokenize the new input sentence
14
  new_user_input_ids = tokenizer.encode(
 
24
  ).tolist()
25
 
26
  # convert the tokens to text, and then split the responses into lines
27
+ response = tokenizer.decode(history[0]).split("")
28
  # print('decoded_response-->>'+str(response))
29
  response = [
30
  (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
 
32
  # print('response-->>'+str(response))
33
  return response, history
34
 
 
35
  gr.Interface(
36
  fn=predict,
37
  title=title,