0xdant commited on
Commit
c4d2e43
1 Parent(s): 57a4d9e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -2
app.py CHANGED
@@ -1,3 +1,34 @@
1
- import gradio as gr
 
 
 
2
 
3
- gr.load("models/facebook/blenderbot-3B").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import torch
3
+ from PIL import Image
4
+ from transformers import MllamaForConditionalGeneration, AutoProcessor
5
 
6
+
7
+ model_id = "meta-llama/Llama-3.2-11B-Vision-Instruct"
8
+
9
+ model = MllamaForConditionalGeneration.from_pretrained(
10
+ model_id,
11
+ torch_dtype=torch.bfloat16,
12
+ device_map="auto",
13
+ )
14
+ processor = AutoProcessor.from_pretrained(model_id)
15
+
16
+ url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"
17
+ image = Image.open(requests.get(url, stream=True).raw)
18
+
19
+ messages = [
20
+ {"role": "user", "content": [
21
+ {"type": "image"},
22
+ {"type": "text", "text": "If I had to write a haiku for this one, it would be: "}
23
+ ]}
24
+ ]
25
+ input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
26
+ inputs = processor(
27
+ image,
28
+ input_text,
29
+ add_special_tokens=False,
30
+ return_tensors="pt"
31
+ ).to(model.device)
32
+
33
+ output = model.generate(**inputs, max_new_tokens=30)
34
+ print(processor.decode(output[0]))