Spaces:
Running
on
A10G
Running
on
A10G
import gradio as gr | |
import torch | |
from transformers import AutoModel, AutoTokenizer, AutoImageProcessor | |
import torch | |
import torchvision.transforms as T | |
from PIL import Image | |
import time | |
import os, sys | |
import json | |
import re | |
from tqdm import tqdm | |
import pandas as pd | |
from torchvision.transforms.functional import InterpolationMode | |
# Define the path to your model | |
path = 'h2oai/h2o-mississippi-2b' | |
# image preprocesing | |
IMAGENET_MEAN = (0.485, 0.456, 0.406) | |
IMAGENET_STD = (0.229, 0.224, 0.225) | |
start_pre = time.time() | |
def build_transform(input_size): | |
MEAN, STD = IMAGENET_MEAN, IMAGENET_STD | |
transform = T.Compose([ | |
T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), | |
T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), | |
T.ToTensor(), | |
T.Normalize(mean=MEAN, std=STD) | |
]) | |
return transform | |
def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size): | |
best_ratio_diff = float('inf') | |
best_ratio = (1, 1) | |
area = width * height | |
for ratio in target_ratios: | |
target_aspect_ratio = ratio[0] / ratio[1] | |
ratio_diff = abs(aspect_ratio - target_aspect_ratio) | |
if ratio_diff < best_ratio_diff: | |
best_ratio_diff = ratio_diff | |
best_ratio = ratio | |
elif ratio_diff == best_ratio_diff: | |
if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: | |
best_ratio = ratio | |
return best_ratio | |
def dynamic_preprocess(image, min_num=1, max_num=6, image_size=448, use_thumbnail=False): | |
orig_width, orig_height = image.size | |
aspect_ratio = orig_width / orig_height | |
# calculate the existing image aspect ratio | |
target_ratios = set( | |
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if | |
i * j <= max_num and i * j >= min_num) | |
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) | |
# find the closest aspect ratio to the target | |
target_aspect_ratio = find_closest_aspect_ratio( | |
aspect_ratio, target_ratios, orig_width, orig_height, image_size) | |
# calculate the target width and height | |
target_width = image_size * target_aspect_ratio[0] | |
target_height = image_size * target_aspect_ratio[1] | |
blocks = target_aspect_ratio[0] * target_aspect_ratio[1] | |
# resize the image | |
resized_img = image.resize((target_width, target_height)) | |
processed_images = [] | |
for i in range(blocks): | |
box = ( | |
(i % (target_width // image_size)) * image_size, | |
(i // (target_width // image_size)) * image_size, | |
((i % (target_width // image_size)) + 1) * image_size, | |
((i // (target_width // image_size)) + 1) * image_size | |
) | |
# split the image | |
split_img = resized_img.crop(box) | |
processed_images.append(split_img) | |
assert len(processed_images) == blocks | |
if use_thumbnail and len(processed_images) != 1: | |
thumbnail_img = image.resize((image_size, image_size)) | |
processed_images.append(thumbnail_img) | |
return processed_images, target_aspect_ratio | |
def dynamic_preprocess2(image, min_num=1, max_num=6, image_size=448, use_thumbnail=False, prior_aspect_ratio=None): | |
orig_width, orig_height = image.size | |
aspect_ratio = orig_width / orig_height | |
# calculate the existing image aspect ratio | |
target_ratios = set( | |
(i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if | |
i * j <= max_num and i * j >= min_num) | |
target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) | |
new_target_ratios = [] | |
if prior_aspect_ratio is not None: | |
for i in target_ratios: | |
if prior_aspect_ratio[0]%i[0] != 0 and prior_aspect_ratio[1]%i[1] != 0: | |
new_target_ratios.append(i) | |
else: | |
continue | |
# find the closest aspect ratio to the target | |
target_aspect_ratio = find_closest_aspect_ratio( | |
aspect_ratio, new_target_ratios, orig_width, orig_height, image_size) | |
# calculate the target width and height | |
target_width = image_size * target_aspect_ratio[0] | |
target_height = image_size * target_aspect_ratio[1] | |
blocks = target_aspect_ratio[0] * target_aspect_ratio[1] | |
# resize the image | |
resized_img = image.resize((target_width, target_height)) | |
processed_images = [] | |
for i in range(blocks): | |
box = ( | |
(i % (target_width // image_size)) * image_size, | |
(i // (target_width // image_size)) * image_size, | |
((i % (target_width // image_size)) + 1) * image_size, | |
((i // (target_width // image_size)) + 1) * image_size | |
) | |
# split the image | |
split_img = resized_img.crop(box) | |
processed_images.append(split_img) | |
assert len(processed_images) == blocks | |
if use_thumbnail and len(processed_images) != 1: | |
thumbnail_img = image.resize((image_size, image_size)) | |
processed_images.append(thumbnail_img) | |
return processed_images | |
def load_image1(image_file, input_size=448, min_num=1, max_num=12): | |
if isinstance(image_file, str): | |
image = Image.open(image_file).convert('RGB') | |
else: | |
image = image_file | |
transform = build_transform(input_size=input_size) | |
images, target_aspect_ratio = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, min_num=min_num, max_num=max_num) | |
pixel_values = [transform(image) for image in images] | |
pixel_values = torch.stack(pixel_values) | |
return pixel_values, target_aspect_ratio | |
def load_image2(image_file, input_size=448, min_num=1, max_num=12, target_aspect_ratio=None): | |
if isinstance(image_file, str): | |
image = Image.open(image_file).convert('RGB') | |
else: | |
image = image_file | |
transform = build_transform(input_size=input_size) | |
images = dynamic_preprocess2(image, image_size=input_size, use_thumbnail=True, min_num=min_num, max_num=max_num, prior_aspect_ratio=target_aspect_ratio) | |
pixel_values = [transform(image) for image in images] | |
pixel_values = torch.stack(pixel_values) | |
return pixel_values | |
def load_image_msac(file_name): | |
pixel_values, target_aspect_ratio = load_image1(file_name, min_num=1, max_num=6) | |
pixel_values = pixel_values.to(torch.bfloat16).cuda() | |
pixel_values2 = load_image2(file_name, min_num=3, max_num=6, target_aspect_ratio=target_aspect_ratio) | |
pixel_values2 = pixel_values2.to(torch.bfloat16).cuda() | |
pixel_values = torch.cat([pixel_values2[:-1], pixel_values[:-1], pixel_values2[-1:]], 0) | |
return pixel_values | |
# Load the model and tokenizer | |
model = AutoModel.from_pretrained( | |
path, | |
torch_dtype=torch.bfloat16, | |
low_cpu_mem_usage=True, | |
trust_remote_code=True | |
).eval().cuda() | |
tokenizer = AutoTokenizer.from_pretrained( | |
path, | |
trust_remote_code=True, | |
use_fast=False | |
) | |
tokenizer.pad_token = tokenizer.unk_token | |
tokenizer.eos_token = "<|end|>" | |
model.generation_config.pad_token_id = tokenizer.pad_token_id | |
def inference(image, prompt): | |
# Check if both image and prompt are provided | |
if image is None or prompt.strip() == "": | |
return "Please provide both an image and a prompt." | |
# Process the image and get pixel_values | |
pixel_values = load_image_msac(image) | |
# Set generation config | |
generation_config = dict( | |
num_beams=1, | |
max_new_tokens=2048, | |
do_sample=False, | |
) | |
# Generate the response | |
response = model.chat( | |
tokenizer, | |
pixel_values, | |
prompt, | |
generation_config | |
) | |
return response | |
# Build the Gradio interface | |
with gr.Blocks() as demo: | |
gr.Markdown("H2O-Mississippi") | |
with gr.Row(): | |
image_input = gr.Image(type="pil", label="Upload an Image") | |
prompt_input = gr.Textbox(label="Enter your prompt here") | |
response_output = gr.Textbox(label="Model Response") | |
with gr.Row(): | |
submit_button = gr.Button("Submit") | |
clear_button = gr.Button("Clear") | |
# When the submit button is clicked, call the inference function | |
submit_button.click( | |
fn=inference, | |
inputs=[image_input, prompt_input], | |
outputs=response_output | |
) | |
# Define the clear button action | |
def clear_all(): | |
return None, "", "" | |
clear_button.click( | |
fn=clear_all, | |
inputs=None, | |
outputs=[image_input, prompt_input, response_output] | |
) | |
demo.launch(share=True) | |