|
|
|
|
|
import spaces |
|
import gradio as gr |
|
from PIL import Image |
|
import traceback |
|
import re |
|
import torch |
|
import argparse |
|
import numpy as np |
|
from transformers import AutoModel, AutoTokenizer |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import torch |
|
from PIL import Image |
|
from transformers import AutoModel, AutoTokenizer |
|
|
|
model = AutoModel.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5-int4', trust_remote_code=True) |
|
tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-Llama3-V-2_5-int4', trust_remote_code=True) |
|
model.eval() |
|
|
|
image = Image.open('xx.jpg').convert('RGB') |
|
question = 'What is in the image?' |
|
msgs = [{'role': 'user', 'content': question}] |
|
|
|
res = model.chat( |
|
image=image, |
|
msgs=msgs, |
|
tokenizer=tokenizer, |
|
sampling=True, |
|
temperature=0.7, |
|
|
|
) |
|
print(res) |
|
|
|
|
|
|
|
res = model.chat( |
|
image=image, |
|
msgs=msgs, |
|
tokenizer=tokenizer, |
|
sampling=True, |
|
temperature=0.7, |
|
stream=True |
|
) |
|
|
|
generated_text = "" |
|
for new_text in res: |
|
generated_text += new_text |
|
print(new_text, flush=True, end='') |
|
|