update 196-version imp
Browse files
test.py
DELETED
@@ -1,28 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
-
from PIL import Image
|
4 |
-
|
5 |
-
|
6 |
-
torch.set_default_device("cuda")
|
7 |
-
|
8 |
-
#Create model
|
9 |
-
model = AutoModelForCausalLM.from_pretrained(
|
10 |
-
"/data2/ouyangxc/upload/Imp-v1-3b-196",
|
11 |
-
torch_dtype=torch.float16,
|
12 |
-
device_map="auto",
|
13 |
-
trust_remote_code=True)
|
14 |
-
tokenizer = AutoTokenizer.from_pretrained("/data2/ouyangxc/upload/Imp-v1-3b-196", trust_remote_code=True)
|
15 |
-
#Set inputs
|
16 |
-
text = "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: <image>\nProvide the bounding box coordinate of the football in this image. ASSISTANT:"
|
17 |
-
image = Image.open("/data2/ouyangxc/imp-v1-3b_196/images/bird.jpg")
|
18 |
-
|
19 |
-
input_ids = tokenizer(text, return_tensors='pt').input_ids
|
20 |
-
image_tensor = model.image_preprocess(image)
|
21 |
-
|
22 |
-
#Generate the answer
|
23 |
-
output_ids = model.generate(
|
24 |
-
input_ids,
|
25 |
-
max_new_tokens=150,
|
26 |
-
images=image_tensor,
|
27 |
-
use_cache=True)[0]
|
28 |
-
print('crop',tokenizer.decode(output_ids[input_ids.shape[1]:], skip_special_tokens=True).strip())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|