import gradio as gr import cv2 import numpy as np from PIL import Image import base64 from io import BytesIO from models.image_text_transformation import ImageTextTransformation import argparse import torch parser = argparse.ArgumentParser() parser.add_argument('--gpt_version', choices=['gpt-3.5-turbo', 'gpt4'], default='gpt-3.5-turbo') parser.add_argument('--image_caption', action='store_true', dest='image_caption', default=True, help='Set this flag to True if you want to use BLIP2 Image Caption') parser.add_argument('--dense_caption', action='store_true', dest='dense_caption', default=True, help='Set this flag to True if you want to use Dense Caption') parser.add_argument('--semantic_segment', action='store_true', dest='semantic_segment', default=False, help='Set this flag to True if you want to use semantic segmentation') parser.add_argument('--image_caption_device', choices=['cuda', 'cpu'], default='cpu', help='Select the device: cuda or cpu, gpu memory larger than 14G is recommended') parser.add_argument('--dense_caption_device', choices=['cuda', 'cpu'], default='cpu', help='Select the device: cuda or cpu, < 6G GPU is not recommended>') parser.add_argument('--semantic_segment_device', choices=['cuda', 'cpu'], default='cpu', help='Select the device: cuda or cpu, gpu memory larger than 14G is recommended') parser.add_argument('--contolnet_device', choices=['cuda', 'cpu'], default='cpu', help='Select the device: cuda or cpu, <6G GPU is not recommended>') args = parser.parse_args() device = "cuda" if torch.cuda.is_available() else "cpu" # device = "cpu" if device == "cuda": args.image_caption_device = "cpu" args.dense_caption_device = "cuda" args.semantic_segment_device = "cuda" args.contolnet_device = "cuda" else: args.image_caption_device = "cpu" args.dense_caption_device = "cpu" args.semantic_segment_device = "cpu" args.contolnet_device = "cpu" def pil_image_to_base64(image): buffered = BytesIO() image.save(buffered, format="JPEG") img_str = base64.b64encode(buffered.getvalue()).decode() return img_str def add_logo(): with open("examples/logo.png", "rb") as f: logo_base64 = base64.b64encode(f.read()).decode() return logo_base64 def process_image(image_src, options=None, processor=None): print(options) if options is None: options = [] # processor.args.semantic_segment = "Semantic Segment" in options processor.args.semantic_segment = False image_generation_status = "Image Generation" in options image_caption, dense_caption, region_semantic, gen_text = processor.image_to_text(image_src) if image_generation_status: gen_image = processor.text_to_image(gen_text) gen_image_str = pil_image_to_base64(gen_image) # Combine the outputs into a single HTML output custom_output = f'''
{image_caption}
{dense_caption}
{region_semantic}
{gen_text}