paloma99 commited on
Commit
6f8418a
1 Parent(s): 87bd002

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -39
app.py CHANGED
@@ -1,47 +1,46 @@
1
- from transformers import AutoModelForCausalLM, AutoTokenizer
2
  import gradio as gr
3
- import torch
4
 
 
5
 
6
- title = "????AI ChatBot"
7
- description = "A State-of-the-Art Large-scale Pretrained Response generation model (DialoGPT)"
8
- examples = [["How are you?"]]
9
 
 
 
 
 
 
 
10
 
11
- tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-large")
12
- model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-large")
13
-
 
14
 
15
- def predict(input, history=[]):
16
- # tokenize the new input sentence
17
- new_user_input_ids = tokenizer.encode(
18
- input + tokenizer.eos_token, return_tensors="pt"
19
- )
20
 
21
- # append the new user input tokens to the chat history
 
22
  bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
23
-
24
- # generate a response
25
- history = model.generate(
26
- bot_input_ids, max_length=4000, pad_token_id=tokenizer.eos_token_id
27
- ).tolist()
28
-
29
- # convert the tokens to text, and then split the responses into lines
30
- response = tokenizer.decode(history[0]).split("<|endoftext|>")
31
- # print('decoded_response-->>'+str(response))
32
- response = [
33
- (response[i], response[i + 1]) for i in range(0, len(response) - 1, 2)
34
- ] # convert to tuples of list
35
- # print('response-->>'+str(response))
36
- return response, history
37
-
38
-
39
- gr.Interface(
40
- fn=predict,
41
- title=title,
42
- description=description,
43
- examples=examples,
44
- inputs=["text", "state"],
45
- outputs=["chatbot", "state"],
46
- theme="finlaymacklon/boxy_violet",
47
- ).launch()
 
1
+ # Cell 1: Image Classification Model
2
  import gradio as gr
3
+ from transformers import pipeline
4
 
5
+ image_pipeline = pipeline(task="image-classification", model="julien-c/hotdog-not-hotdog")
6
 
7
+ def predict_image(input_img):
8
+ predictions = image_pipeline(input_img)
9
+ return input_img, {p["label"]: p["score"] for p in predictions}
10
 
11
+ image_gradio_app = gr.Interface(
12
+ predict_image,
13
+ inputs=gr.Image(label="Select hot dog candidate", sources=['upload', 'webcam'], type="pil"),
14
+ outputs=[gr.Image(label="Processed Image"), gr.Label(label="Result", num_top_classes=2)],
15
+ title="Hot Dog? Or Not?",
16
+ )
17
 
18
+ # Cell 2: Chatbot Model
19
+ import gradio as gr
20
+ from transformers import AutoModelForCausalLM, AutoTokenizer
21
+ import torch
22
 
23
+ tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-medium")
24
+ chatbot_model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-medium")
 
 
 
25
 
26
+ def predict_chatbot(input, history=[]):
27
+ new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')
28
  bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)
29
+ history = chatbot_model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()
30
+ response = tokenizer.decode(history[0]).split("")
31
+
32
+ response_tuples = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)]
33
+ return response_tuples, history
34
+
35
+ chatbot_gradio_app = gr.Blocks()
36
+ with chatbot_gradio_app as demo:
37
+ chatbot = gr.Chatbot()
38
+ state = gr.State([])
39
+ with gr.Row():
40
+ txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
41
+ txt.submit(predict_chatbot, [txt, state], [chatbot, state])
42
+
43
+ # Launch the interfaces
44
+ if __name__ == "__main__":
45
+ image_gradio_app.launch()
46
+ chatbot_gradio_app.launch()