Spaces:
Runtime error
Runtime error
File size: 9,268 Bytes
74aff4c 5e98383 cde1127 5e98383 1eaa8d9 5e98383 0ba8713 5e98383 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright @2023 RhapsodyAI, ModelBest Inc. (modelbest.cn)
#
# @author: bokai xu <[email protected]>
# @date: 2024/07/13
#
import tqdm
from PIL import Image
import hashlib
import torch
import fitz
import threading
import gradio as gr
import spaces
import os
from transformers import AutoModel
from transformers import AutoTokenizer
from PIL import Image
import torch
import os
import numpy as np
import json
cache_dir = '~/.cache/huggingface/datasets'
os.makedirs(cache_dir, exist_ok=True)
def get_image_md5(img: Image.Image):
img_byte_array = img.tobytes()
hash_md5 = hashlib.md5()
hash_md5.update(img_byte_array)
hex_digest = hash_md5.hexdigest()
return hex_digest
def calculate_md5_from_binary(binary_data):
hash_md5 = hashlib.md5()
hash_md5.update(binary_data)
return hash_md5.hexdigest()
@spaces.GPU(duration=100)
def add_pdf_gradio(pdf_file_binary, progress=gr.Progress()):
global model, tokenizer
model.eval()
knowledge_base_name = calculate_md5_from_binary(pdf_file_binary)
this_cache_dir = os.path.join(cache_dir, knowledge_base_name)
os.makedirs(this_cache_dir, exist_ok=True)
with open(os.path.join(this_cache_dir, f"src.pdf"), 'wb') as file:
file.write(pdf_file_binary)
dpi = 200
doc = fitz.open("pdf", pdf_file_binary)
reps_list = []
images = []
image_md5s = []
for page in progress.tqdm(doc):
# with self.lock: # because we hope one 16G gpu only process one image at the same time
pix = page.get_pixmap(dpi=dpi)
image = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
image_md5 = get_image_md5(image)
image_md5s.append(image_md5)
with torch.no_grad():
reps = model(text=[''], image=[image], tokenizer=tokenizer).reps
reps_list.append(reps.squeeze(0).cpu().numpy())
images.append(image)
for idx in range(len(images)):
image = images[idx]
image_md5 = image_md5s[idx]
cache_image_path = os.path.join(this_cache_dir, f"{image_md5}.png")
image.save(cache_image_path)
np.save(os.path.join(this_cache_dir, f"reps.npy"), reps_list)
with open(os.path.join(this_cache_dir, f"md5s.txt"), 'w') as f:
for item in image_md5s:
f.write(item+'\n')
return knowledge_base_name
# @spaces.GPU
def retrieve_gradio(knowledge_base: str, query: str, topk: int):
global model, tokenizer
model.eval()
target_cache_dir = os.path.join(cache_dir, knowledge_base)
if not os.path.exists(target_cache_dir):
return None
md5s = []
with open(os.path.join(target_cache_dir, f"md5s.txt"), 'r') as f:
for line in f:
md5s.append(line.rstrip('\n'))
doc_reps = np.load(os.path.join(target_cache_dir, f"reps.npy"))
query_with_instruction = "Represent this query for retrieving relavant document: " + query
with torch.no_grad():
query_rep = model(text=[query_with_instruction], image=[None], tokenizer=tokenizer).reps.squeeze(0).cpu()
query_md5 = hashlib.md5(query.encode()).hexdigest()
doc_reps_cat = torch.stack([torch.Tensor(i) for i in doc_reps], dim=0)
similarities = torch.matmul(query_rep, doc_reps_cat.T)
topk_values, topk_doc_ids = torch.topk(similarities, k=topk)
topk_values_np = topk_values.cpu().numpy()
topk_doc_ids_np = topk_doc_ids.cpu().numpy()
similarities_np = similarities.cpu().numpy()
images_topk = [Image.open(os.path.join(target_cache_dir, f"{md5s[idx]}.png")) for idx in topk_doc_ids_np]
with open(os.path.join(target_cache_dir, f"q-{query_md5}.json"), 'w') as f:
f.write(json.dumps(
{
"knowledge_base": knowledge_base,
"query": query,
"retrived_docs": [os.path.join(target_cache_dir, f"{md5s[idx]}.png") for idx in topk_doc_ids_np]
}, indent=4, ensure_ascii=False
))
return images_topk
def upvote(knowledge_base, query):
global model, tokenizer
target_cache_dir = os.path.join(cache_dir, knowledge_base)
query_md5 = hashlib.md5(query.encode()).hexdigest()
with open(os.path.join(target_cache_dir, f"q-{query_md5}.json"), 'r') as f:
data = json.loads(f.read())
data["user_preference"] = "upvote"
with open(os.path.join(target_cache_dir, f"q-{query_md5}-withpref.json"), 'w') as f:
f.write(json.dumps(data, indent=4, ensure_ascii=False))
print("up", os.path.join(target_cache_dir, f"q-{query_md5}-withpref.json"))
gr.Info('Received, babe! Thank you!')
return
def downvote(knowledge_base, query):
global model, tokenizer
target_cache_dir = os.path.join(cache_dir, knowledge_base)
query_md5 = hashlib.md5(query.encode()).hexdigest()
with open(os.path.join(target_cache_dir, f"q-{query_md5}.json"), 'r') as f:
data = json.loads(f.read())
data["user_preference"] = "downvote"
with open(os.path.join(target_cache_dir, f"q-{query_md5}-withpref.json"), 'w') as f:
f.write(json.dumps(data, indent=4, ensure_ascii=False))
print("down", os.path.join(target_cache_dir, f"q-{query_md5}-withpref.json"))
gr.Info('Received, babe! Thank you!')
return
device = 'cuda'
print("emb model load begin...")
model_path = 'RhapsodyAI/minicpm-visual-embedding-v0' # replace with your local model path
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
model = AutoModel.from_pretrained(model_path, trust_remote_code=True)
model.eval()
model.to(device)
print("emb model load success!")
print("gen model load begin...")
gen_model_path = 'openbmb/MiniCPM-V-2_6'
gen_tokenizer = AutoTokenizer.from_pretrained(gen_model_path, trust_remote_code=True)
gen_model = AutoModel.from_pretrained(gen_model_path, trust_remote_code=True, attn_implementation='sdpa', torch_dtype=torch.bfloat16)
gen_model.eval()
gen_model.to(device)
print("gen model load success!")
@spaces.GPU(duration=50)
def answer_question(images, question):
global gen_model, gen_tokenizer
# here each element of images is a tuple of (image_path, None).
images_ = [Image.open(image[0]).convert('RGB') for image in images]
msgs = [{'role': 'user', 'content': [question, *images_]}]
answer = gen_model.chat(
image=None,
msgs=msgs,
tokenizer=gen_tokenizer
)
print(answer)
return answer
with gr.Blocks() as app:
gr.Markdown("# MiniCPMV-RAG-PDFQA: Two Vision Language Models Enable End-to-End RAG")
gr.Markdown("""
- A Vision Language Model Dense Retriever ([minicpm-visual-embedding-v0](https://huggingface.co/RhapsodyAI/minicpm-visual-embedding-v0)) **directly reads** your PDFs **without need of OCR**, produce **multimodal dense representations** and build your personal library.
- **Ask a question**, it retrieve most relavant pages, then [MiniCPM-V-2.6](https://huggingface.co/spaces/openbmb/MiniCPM-V-2_6) will answer your question based on pages recalled, with strong multi-image understanding capability.
- It helps you read a long **visually-intensive** or **text-oriented** PDF document and find the pages that answer your question.
- It helps you build a personal library and retireve book pages from a large collection of books.
- It works like a human: read, store, retrieve, and answer with full vision.
""")
gr.Markdown("- Currently online demo support PDF document with less than 50 pages due to GPU time limit. Deploy on your own machine for longer PDFs and books.")
with gr.Row():
file_input = gr.File(type="binary", label="Step 1: Upload PDF")
file_result = gr.Text(label="Knowledge Base ID (remember it, it is re-usable!)")
process_button = gr.Button("Process PDF (Don't click until PDF upload success)")
process_button.click(add_pdf_gradio, inputs=[file_input], outputs=file_result)
with gr.Row():
kb_id_input = gr.Text(label="Your Knowledge Base ID (paste your Knowledge Base ID here, it is re-usable:)")
query_input = gr.Text(label="Your Queston")
topk_input = inputs=gr.Number(value=5, minimum=1, maximum=10, step=1, label="Number of pages to retrieve")
retrieve_button = gr.Button("Step2: Retrieve Pages")
with gr.Row():
images_output = gr.Gallery(label="Retrieved Pages")
retrieve_button.click(retrieve_gradio, inputs=[kb_id_input, query_input, topk_input], outputs=images_output)
with gr.Row():
button = gr.Button("Step 3: Answer Question with Retrieved Pages")
gen_model_response = gr.Textbox(label="MiniCPM-V-2.6's Answer")
button.click(fn=answer_question, inputs=[images_output, query_input], outputs=gen_model_response)
with gr.Row():
downvote_button = gr.Button("🤣Downvote")
upvote_button = gr.Button("🤗Upvote")
upvote_button.click(upvote, inputs=[kb_id_input, query_input], outputs=None)
downvote_button.click(downvote, inputs=[kb_id_input, query_input], outputs=None)
gr.Markdown("By using this demo, you agree to share your use data with us for research purpose, to help improve user experience.")
app.launch()
|