Spaces:
Runtime error
Runtime error
import gradio as gr | |
import torch | |
import os | |
from transformers import BlipForConditionalGeneration, BlipProcessor, GenerationConfig | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
_MODEL_PATH = 'IDEA-CCNL/Taiyi-BLIP-750M-Chinese' | |
HF_TOKEN = os.getenv('HF_TOKEN') | |
processor = BlipProcessor.from_pretrained(_MODEL_PATH, use_auth_token=HF_TOKEN) | |
model = BlipForConditionalGeneration.from_pretrained( | |
_MODEL_PATH, use_auth_token=HF_TOKEN).eval().to(device) | |
def inference(raw_image, model_n, strategy): | |
if model_n == 'Image Captioning': | |
input = processor(raw_image, return_tensors="pt").to(device) | |
with torch.no_grad(): | |
if strategy == "Beam search": | |
config = GenerationConfig( | |
do_sample=False, | |
num_beams=3, | |
max_length=50, | |
min_length=5, | |
) | |
captions = model.generate(**input, generation_config=config) | |
else: | |
config = GenerationConfig( | |
do_sample=True, | |
top_p=0.9, | |
max_length=50, | |
min_length=5, | |
) | |
captions = model.generate(**input, generation_config=config) | |
caption = processor.decode(captions[0], skip_special_tokens=True) | |
caption = caption.replace(' ', '') | |
print(caption) | |
return 'caption: '+caption | |
inputs = [gr.inputs.Image(type='pil'), gr.inputs.Radio(choices=['Image Captioning'], type="value", default="Image Captioning", label="Task"), gr.inputs.Radio( | |
choices=['Beam search', 'Nucleus sampling'], type="value", default="Nucleus sampling", label="Caption Decoding Strategy")] | |
outputs = gr.outputs.Textbox(label="Output") | |
title = "BLIP" | |
description = "Gradio demo for BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation (Salesforce Research). To use it, simply upload your image, or click one of the examples to load them. Read more at the links below." | |
article = "<p style='text-align: center'><a href='https://github.com/IDEA-CCNL/Fengshenbang-LM' target='_blank'>Github Repo</a></p>" | |
gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=[ | |
['demo.jpg', "Image Captioning", "Nucleus sampling"]]).launch(enable_queue=True) | |