import gradio as gr import torch import os from transformers import BlipForConditionalGeneration, BlipProcessor, GenerationConfig device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') _MODEL_PATH = 'IDEA-CCNL/Taiyi-BLIP-750M-Chinese' HF_TOKEN = os.getenv('HF_TOKEN') processor = BlipProcessor.from_pretrained(_MODEL_PATH, use_auth_token=HF_TOKEN) model = BlipForConditionalGeneration.from_pretrained( _MODEL_PATH, use_auth_token=HF_TOKEN).half().eval().to(device) def inference(raw_image, model_n, question, strategy): if model_n == 'Image Captioning': image = processor(raw_image).to(device, torch.float16) with torch.no_grad(): if strategy == "Beam search": config = GenerationConfig( do_sample=False, num_beams=3, max_length=20, min_length=5, ) captions = model.generate(image, generation_config=config) else: config = GenerationConfig( do_sample=True, top_p=0.9, max_length=20, min_length=5, ) captions = model.generate(image, generation_config=config) caption = processor.decode(captions[0], skip_special_tokens=True) caption = caption.replace(' ', '') return 'caption: '+caption inputs = [gr.inputs.Image(type='pil'), gr.inputs.Radio(choices=['Image Captioning'], type="value", default="Image Captioning", label="Task"), gr.inputs.Textbox( lines=2, label="Question"), gr.inputs.Radio(choices=['Beam search', 'Nucleus sampling'], type="value", default="Nucleus sampling", label="Caption Decoding Strategy")] outputs = gr.outputs.Textbox(label="Output") title = "BLIP" description = "Gradio demo for BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation (Salesforce Research). To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." article = "
" gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=[ ['demo.jpg', "Image Captioning", "None", "Nucleus sampling"]]).launch(enable_queue=True)