Spaces:
Runtime error
Runtime error
File size: 2,423 Bytes
57f0afd 062ce8e 57f0afd d06d643 d129116 57f0afd 0fdef0d 57f0afd d129116 57f0afd df08d3f 57f0afd e58a035 57f0afd df08d3f 57f0afd e58a035 57f0afd a08f8d9 57f0afd 4a8d33f 57f0afd 0fdef0d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 |
import gradio as gr
import torch
import os
from transformers import BlipForConditionalGeneration, BlipProcessor, GenerationConfig
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
_MODEL_PATH = 'IDEA-CCNL/Taiyi-BLIP-750M-Chinese'
HF_TOKEN = os.getenv('HF_TOKEN')
processor = BlipProcessor.from_pretrained(_MODEL_PATH, use_auth_token=HF_TOKEN)
model = BlipForConditionalGeneration.from_pretrained(
_MODEL_PATH, use_auth_token=HF_TOKEN).eval().to(device)
def inference(raw_image, model_n, strategy):
if model_n == 'Image Captioning':
input = processor(raw_image, return_tensors="pt").to(device)
with torch.no_grad():
if strategy == "Beam search":
config = GenerationConfig(
do_sample=False,
num_beams=3,
max_length=50,
min_length=5,
)
captions = model.generate(**input, generation_config=config)
else:
config = GenerationConfig(
do_sample=True,
top_p=0.9,
max_length=50,
min_length=5,
)
captions = model.generate(**input, generation_config=config)
caption = processor.decode(captions[0], skip_special_tokens=True)
caption = caption.replace(' ', '')
print(caption)
return 'caption: '+caption
inputs = [gr.inputs.Image(type='pil'), gr.inputs.Radio(choices=['Image Captioning'], type="value", default="Image Captioning", label="Task"), gr.inputs.Radio(
choices=['Beam search', 'Nucleus sampling'], type="value", default="Nucleus sampling", label="Caption Decoding Strategy")]
outputs = gr.outputs.Textbox(label="Output")
title = "BLIP"
description = "Gradio demo for BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation (Salesforce Research). To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
article = "<p style='text-align: center'><a href='https://github.com/IDEA-CCNL/Fengshenbang-LM' target='_blank'>Github Repo</a></p>"
gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=[
['demo.jpg', "Image Captioning", "Nucleus sampling"]]).launch(enable_queue=True)
|