LLava
Collection
lava
•
4 items
•
Updated
模型结构:
llava-Qwen2-7B-Instruct-Chinese-CLIP = Qwen/Qwen2-7B-Instruct + multi_modal_projector + OFA-Sys/chinese-clip-vit-large-patch14-336px
微调模块
为了提升模型对中文文字的识别能力,我们引入了 priyank-m/chinese_text_recognition、SWHL/ChineseOCRBench 和 fly0331/ChineseTest 三个基于中文文字领域图片的数据集。这些数据集的融入将有效弥补 REILX/llava-Qwen2-7B-Instruct-Chinese-CLIP 模型在中文文字识别方面的不足,使其能够更好地理解和处理中文文本信息。
效果展示
以下测试结果显示模型能识别图像中的文字信息,且能正确识别表情包想要表达的内涵
代码
推理代码
from transformers import LlavaForConditionalGeneration, AutoProcessor
import torch
from PIL import Image
raw_model_name_or_path = "/保存的完整模型路径"
model = LlavaForConditionalGeneration.from_pretrained(raw_model_name_or_path, device_map="cuda:0", torch_dtype=torch.bfloat16)
processor = AutoProcessor.from_pretrained(raw_model_name_or_path)
model.eval()
def build_model_input(model, processor):
messages = [
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "<image>\n 你是一位有深度的网络图片解读者,擅长解读和描述网络图片。你能洞察图片中的细微之处,对图中的人物面部表情、文字信息、情绪流露和背景寓意具有超强的理解力,描述信息需要详细。"}
]
prompt = processor.tokenizer.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
image = Image.open("01.PNG")
inputs = processor(text=prompt, images=image, return_tensors="pt", return_token_type_ids=False)
for tk in inputs.keys():
inputs[tk] = inputs[tk].to(model.device)
generate_ids = model.generate(**inputs, max_new_tokens=200)
generate_ids = [
oid[len(iids):] for oid, iids in zip(generate_ids, inputs.input_ids)
]
gen_text = processor.batch_decode(generate_ids, skip_special_tokens=False, clean_up_tokenization_spaces=False)[0]
return gen_text
build_model_input(model, processor)