Spaces:
Build error
Build error
Commit
•
f315fcf
0
Parent(s):
Duplicate from gorkemgoknar/moviechatbot
Browse filesCo-authored-by: Gorkem Goknar <[email protected]>
- .gitattributes +27 -0
- README.md +38 -0
- app.py +148 -0
- requirements.txt +2 -0
.gitattributes
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bin.* filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
20 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
26 |
+
*.zstandard filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Moviechatbot - GPT chatbot
|
3 |
+
emoji: 👀
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: indigo
|
6 |
+
sdk: gradio
|
7 |
+
app_file: app.py
|
8 |
+
pinned: false
|
9 |
+
duplicated_from: gorkemgoknar/moviechatbot
|
10 |
+
---
|
11 |
+
|
12 |
+
# Configuration
|
13 |
+
|
14 |
+
`title`: _string_
|
15 |
+
Poc with no model first
|
16 |
+
|
17 |
+
`emoji`: _string_
|
18 |
+
Space emoji (emoji-only character allowed)
|
19 |
+
|
20 |
+
`colorFrom`: _string_
|
21 |
+
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
22 |
+
|
23 |
+
`colorTo`: _string_
|
24 |
+
Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
|
25 |
+
|
26 |
+
`sdk`: _string_
|
27 |
+
Can be either `gradio` or `streamlit`
|
28 |
+
|
29 |
+
`sdk_version` : _string_
|
30 |
+
Only applicable for `streamlit` SDK.
|
31 |
+
See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
|
32 |
+
|
33 |
+
`app_file`: _string_
|
34 |
+
Path to your main application file (which contains either `gradio` or `streamlit` Python code).
|
35 |
+
Path is relative to the root of the repository.
|
36 |
+
|
37 |
+
`pinned`: _boolean_
|
38 |
+
Whether the Space stays on top of your list.
|
app.py
ADDED
@@ -0,0 +1,148 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import random
|
3 |
+
import torch
|
4 |
+
from transformers import AutoConfig
|
5 |
+
from transformers import GPT2Tokenizer, GPT2LMHeadModel
|
6 |
+
from itertools import chain
|
7 |
+
|
8 |
+
|
9 |
+
config = AutoConfig.from_pretrained('gorkemgoknar/gpt2chatbotenglish')
|
10 |
+
model = GPT2LMHeadModel.from_pretrained('gorkemgoknar/gpt2chatbotenglish', config=config)
|
11 |
+
|
12 |
+
tokenizer = GPT2Tokenizer.from_pretrained('gorkemgoknar/gpt2chatbotenglish')
|
13 |
+
tokenizer.model_max_length = 1024
|
14 |
+
|
15 |
+
#Dynamic Temperature
|
16 |
+
#See experiment https://www.linkedin.com/pulse/ai-goes-job-interview-g%25C3%25B6rkem-g%25C3%25B6knar
|
17 |
+
|
18 |
+
base_temperature = 1.3
|
19 |
+
dynamic_temperature_range = 0.15
|
20 |
+
|
21 |
+
rand_range = random.uniform(-1 * dynamic_temperature_range , dynamic_temperature_range )
|
22 |
+
temperature = base_temperature + rand_range
|
23 |
+
|
24 |
+
SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
|
25 |
+
|
26 |
+
#See document for experiment https://www.linkedin.com/pulse/ai-goes-job-interview-g%C3%B6rkem-g%C3%B6knar/
|
27 |
+
|
28 |
+
def get_chat_response(name,history=[], input_txt = "Hello , what is your name?"):
|
29 |
+
|
30 |
+
ai_history = history.copy()
|
31 |
+
|
32 |
+
#ai_history.append(input_txt)
|
33 |
+
ai_history_e = [tokenizer.encode(e) for e in ai_history]
|
34 |
+
|
35 |
+
personality = "My name is " + name
|
36 |
+
|
37 |
+
bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])
|
38 |
+
|
39 |
+
#persona first, history next, input text must be at the end
|
40 |
+
#[[bos, persona] , [history] , [input]]
|
41 |
+
sequence = [[bos] + tokenizer.encode(personality)] + ai_history_e + [tokenizer.encode(input_txt)]
|
42 |
+
##[[bos, persona] , [speaker1 .., speakser2 .., speaker1 ... speaker2 ... , [input]]
|
43 |
+
sequence = [sequence[0]] + [[speaker2 if (len(sequence)-i) % 2 else speaker1] + s for i, s in enumerate(sequence[1:])]
|
44 |
+
|
45 |
+
sequence = list(chain(*sequence))
|
46 |
+
|
47 |
+
#bot_input_ids = tokenizer.encode(personality + tokenizer.eos_token + input_txt + tokenizer.eos_token , return_tensors='pt')
|
48 |
+
sequence_len = len(sequence)
|
49 |
+
|
50 |
+
#optimum response and speed
|
51 |
+
chat_history_ids = model.generate(
|
52 |
+
torch.tensor(sequence).unsqueeze(0), max_length=50,
|
53 |
+
pad_token_id=tokenizer.eos_token_id,
|
54 |
+
no_repeat_ngram_size=3,
|
55 |
+
do_sample=True,
|
56 |
+
top_k=60,
|
57 |
+
top_p=0.8,
|
58 |
+
temperature = 1.3
|
59 |
+
)
|
60 |
+
out_str = tokenizer.decode(chat_history_ids[0][sequence_len:], skip_special_tokens=True)
|
61 |
+
#out_str = tokenizer.decode(chat_history_ids[:, sequence.shape[-1]:][0], skip_special_tokens=False)
|
62 |
+
return out_str
|
63 |
+
|
64 |
+
##you can use anyone from below
|
65 |
+
'''
|
66 |
+
| Macleod | Moran | Brenda | Ramirez | Peter Parker | Quentin Beck | Andy
|
67 |
+
| Red | Norton | Willard | Chief | Chef | Kilgore | Kurtz | Westley | Buttercup
|
68 |
+
| Vizzini | Fezzik | Inigo | Man In Black | Taylor | Zira | Zaius | Cornelius
|
69 |
+
| Bud | Lindsey | Hippy | Erin | Ed | George | Donna | Trinity | Agent Smith
|
70 |
+
| Morpheus | Neo | Tank | Meryl | Truman | Marlon | Christof | Stromboli | Bumstead
|
71 |
+
| Schreber | Walker | Korben | Cornelius | Loc Rhod | Anakin | Obi-Wan | Palpatine
|
72 |
+
| Padme | Superman | Luthor | Dude | Walter | Donny | Maude | General | Starkiller
|
73 |
+
| Indiana | Willie | Short Round | John | Sarah | Terminator | Miller | Sarge | Reiben
|
74 |
+
| Jackson | Upham | Chuckie | Will | Lambeau | Sean | Skylar | Saavik | Spock
|
75 |
+
| Kirk | Bones | Khan | Kirk | Spock | Sybok | Scotty | Bourne | Pamela | Abbott
|
76 |
+
| Nicky | Marshall | Korshunov | Troy | Vig | Archie Gates | Doc | Interrogator
|
77 |
+
| Ellie | Ted | Peter | Drumlin | Joss | Macready | Childs | Nicholas | Conrad
|
78 |
+
| Feingold | Christine | Adam | Barbara | Delia | Lydia | Cathy | Charles | Otho
|
79 |
+
| Schaefer | Han | Luke | Leia | Threepio | Vader | Yoda | Lando | Elaine | Striker
|
80 |
+
| Dr. Rumack | Kramer | David | Saavik | Kirk | Kruge | Holden | Deckard | Rachael
|
81 |
+
| Batty | Sebastian | Sam | Frodo | Pippin | Gandalf | Kay | Edwards | Laurel
|
82 |
+
| Edgar | Zed | Jay | Malloy | Plissken | Steve Rogers | Tony Stark | Scott Lang
|
83 |
+
| Bruce Banner | Bruce | Edward | Two-Face | Batman | Chase | Alfred | Dick
|
84 |
+
| Riddler | Din Djarin | Greef Karga | Kuiil | Ig-11 | Cara Dune | Peli Motto
|
85 |
+
| Toro Calican | Ripley | Meredith | Dickie | Marge | Peter | Lambert | Kane
|
86 |
+
| Dallas | Ripley | Ash | Parker | Threepio | Luke | Leia | Ben | Han | Common Bob
|
87 |
+
| Common Alice | Jack | Tyler | Marla | Dana | Stantz | Venkman | Spengler | Louis
|
88 |
+
| Fry | Johns | Riddick | Kirk | Decker | Spock | "Ilia | Indy | Belloq | Marion
|
89 |
+
| Brother | Allnut | Rose | Qui-Gon | Jar Jar
|
90 |
+
'''
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
def greet(character,message,history):
|
96 |
+
|
97 |
+
#gradios set_state/get_state had problems on embedded html!
|
98 |
+
history = history or {"character": character, "message_history" : [] }
|
99 |
+
#gradios set_state/get_state does not persist session for now using global
|
100 |
+
#global history
|
101 |
+
|
102 |
+
if history["character"] != character:
|
103 |
+
#switching character
|
104 |
+
history = {"character": character, "message_history" : [] }
|
105 |
+
|
106 |
+
|
107 |
+
response = get_chat_response(character,history=history["message_history"],input_txt=message)
|
108 |
+
|
109 |
+
history["message_history"].append((message, response))
|
110 |
+
|
111 |
+
|
112 |
+
html = "<div class='chatbot'>"
|
113 |
+
for user_msg, resp_msg in history["message_history"]:
|
114 |
+
html += f"<div class='user_msg'>You: {user_msg}</div>"
|
115 |
+
html += f"<div class='resp_msg'>{character}: {resp_msg}</div>"
|
116 |
+
html += "</div>"
|
117 |
+
|
118 |
+
return html,history
|
119 |
+
|
120 |
+
|
121 |
+
|
122 |
+
personality_choices = ["Gandalf", "Riddick", "Macleod", "Morpheus", "Neo","Spock","Vader","Indy"]
|
123 |
+
|
124 |
+
examples= ["Gandalf", "What is your name?"]
|
125 |
+
|
126 |
+
css="""
|
127 |
+
.chatbox {display:flex;flex-direction:column}
|
128 |
+
.user_msg, .resp_msg {padding:4px;margin-bottom:4px;border-radius:4px;width:80%}
|
129 |
+
.user_msg {background-color:cornflowerblue;color:white;align-self:start}
|
130 |
+
.resp_msg {background-color:lightgray;align-self:self-end}
|
131 |
+
"""
|
132 |
+
|
133 |
+
|
134 |
+
#some selected ones are in for demo use
|
135 |
+
personality_choices = ["Gandalf", "Riddick", "Macleod", "Morpheus", "Neo","Spock","Vader","Indy", "Ig-11","Threepio","Tony Stark","Batman","Vizzini"]
|
136 |
+
title = "Metayazar - Movie Chatbot"
|
137 |
+
description = "Chat with your favorite movie characters. This space demo has simple interface and simple history as gradio's state did not work need to make a global history, you will have different responses as same gradio machine will be used! Test it out in metayazar.com/chatbot for more movie/character options and history memorized."
|
138 |
+
article = "<p style='text-align: center'><a href='https://www.linkedin.com/pulse/ai-goes-job-interview-g%C3%B6rkem-g%C3%B6knar/' target='_blank'>AI Goes to Job Interview</a> | <a href='https://www.metayazar.com/' target='_blank'>Metayazar AI Writer</a> |<a href='https://www.linkedin.com/in/goknar/' target='_blank'>Görkem Göknar</a></p>"
|
139 |
+
|
140 |
+
#History not implemented in this demo, use metayazar.com/chatbot for a movie and character dropdown chat interface
|
141 |
+
##interface = gr.Interface(fn=greet, inputs=[gr.inputs.Dropdown(personality_choices) ,"text"], title=title, description=description, outputs="text")
|
142 |
+
|
143 |
+
history = {"character": "None", "message_history" : [] }
|
144 |
+
interface= gr.Interface(fn=greet, inputs=[gr.inputs.Dropdown(personality_choices) ,"text", "state"], outputs=["html","state"],css=css, title=title, description=description,article=article )
|
145 |
+
|
146 |
+
|
147 |
+
if __name__ == "__main__":
|
148 |
+
interface.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
transformers
|