import gradio as gr import torch from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, StoppingCriteria import gradio as gr import spaces import torch import numpy as np import torch import torchvision.transforms as T from PIL import Image from torchvision.transforms.functional import InterpolationMode from transformers import AutoModel, AutoTokenizer from threading import Thread import re import time from PIL import Image import torch import spaces import subprocess subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True) torch.set_default_device('cuda') IMAGENET_MEAN = (0.485, 0.456, 0.406) IMAGENET_STD = (0.229, 0.224, 0.225) def build_transform(input_size): MEAN, STD = IMAGENET_MEAN, IMAGENET_STD transform = T.Compose([ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), T.ToTensor(), T.Normalize(mean=MEAN, std=STD) ]) return transform def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size): best_ratio_diff = float('inf') best_ratio = (1, 1) area = width * height for ratio in target_ratios: target_aspect_ratio = ratio[0] / ratio[1] ratio_diff = abs(aspect_ratio - target_aspect_ratio) if ratio_diff < best_ratio_diff: best_ratio_diff = ratio_diff best_ratio = ratio elif ratio_diff == best_ratio_diff: if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: best_ratio = ratio return best_ratio def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False): orig_width, orig_height = image.size aspect_ratio = orig_width / orig_height # calculate the existing image aspect ratio target_ratios = set( (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if i * j <= max_num and i * j >= min_num) target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) # find the closest aspect ratio to the target target_aspect_ratio = find_closest_aspect_ratio( aspect_ratio, target_ratios, orig_width, orig_height, image_size) # calculate the target width and height target_width = image_size * target_aspect_ratio[0] target_height = image_size * target_aspect_ratio[1] blocks = target_aspect_ratio[0] * target_aspect_ratio[1] # resize the image resized_img = image.resize((target_width, target_height)) processed_images = [] for i in range(blocks): box = ( (i % (target_width // image_size)) * image_size, (i // (target_width // image_size)) * image_size, ((i % (target_width // image_size)) + 1) * image_size, ((i // (target_width // image_size)) + 1) * image_size ) # split the image split_img = resized_img.crop(box) processed_images.append(split_img) assert len(processed_images) == blocks if use_thumbnail and len(processed_images) != 1: thumbnail_img = image.resize((image_size, image_size)) processed_images.append(thumbnail_img) return processed_images def load_image(image_file, input_size=448, max_num=12): image = Image.open(image_file).convert('RGB') transform = build_transform(input_size=input_size) images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num) pixel_values = [transform(image) for image in images] pixel_values = torch.stack(pixel_values) return pixel_values model = AutoModel.from_pretrained( "5CD-AI/Viet-InternVL2-1B", torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, trust_remote_code=True, ).eval().cuda() tokenizer = AutoTokenizer.from_pretrained("5CD-AI/Viet-InternVL2-1B", trust_remote_code=True, use_fast=False) @spaces.GPU def chat(message, history): print(history) print(message) if len(history) == 0 or len(message["files"]) != 0: test_image = message["files"][0]["path"] else: test_image = history[0][0][0] pixel_values = load_image(test_image, max_num=12).to(torch.bfloat16).cuda() generation_config = dict(max_new_tokens= 1024, do_sample=True, num_beams = 3, repetition_penalty=2.5) if len(history) == 0: question = '\n'+message["text"] response, conv_history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True) else: conv_history = [] for chat_pair in history: if chat_pair[1] is not None: if len(conv_history) == 0 and len(message["files"]) == 0: chat_pair[0] = '\n' + chat_pair[0] conv_history.append(tuple(chat_pair)) print(conv_history) if len(message["files"]) != 0: question = '\n'+message["text"] else: question = message["text"] response, conv_history = model.chat(tokenizer, pixel_values, question, generation_config, history=conv_history, return_history=True) print(f'User: {question}\nAssistant: {response}') buffer = "" for new_text in response: buffer += new_text generated_text_without_prompt = buffer[:] time.sleep(0.01) yield generated_text_without_prompt CSS =""" @media only screen and (max-width: 600px){ #component-3 { height: 90dvh !important; transform-origin: top; /* Đảm bảo rằng phần tử mở rộng từ trên xuống */ border-style: solid; overflow: hidden; flex-grow: 1; min-width: min(160px, 100%); border-width: var(--block-border-width); } } #component-3 { height: 50dvh !important; transform-origin: top; /* Đảm bảo rằng phần tử mở rộng từ trên xuống */ border-style: solid; overflow: hidden; flex-grow: 1; min-width: min(160px, 100%); border-width: var(--block-border-width); } """ demo = gr.ChatInterface( fn=chat, description="""Try [Vintern-1B](https://huggingface.co/5CD-AI/Viet-InternVL2-1B) in this demo. Vintern-1B is a multimodal large language model series, featuring models of various sizes. For each size, we release instruction-tuned models optimized for multimodal tasks. Vintern-1B consists of [InternViT-300M-448px](https://huggingface.co/OpenGVLab/InternViT-300M-448px), an MLP projector, and [Qwen2-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct).""", examples=[{"text": "Mô tả hình ảnh.", "files":["./demo_3.jpg"]}, {"text": "Trích xuất các thông tin từ ảnh.", "files":["./demo_1.jpg"]}, {"text": "Mô tả hình ảnh một cách chi tiết.", "files":["./demo_2.jpg"]}], title="❄️ Vintern-1B ❄️", multimodal=True, css=CSS ) demo.queue().launch()