File size: 1,080 Bytes
5334465
 
 
 
2cdb9d7
 
5334465
 
 
 
 
 
 
 
556db9d
 
5334465
 
 
556db9d
 
5334465
 
a335e08
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
import torch
from transformers import AutoProcessor,AutoModelForCausalLM
import gradio as gr 
device = 'cuda' if torch.cuda.is_available() else 'cpu'
processor=AutoProcessor.from_pretrained("alibidaran/General_image_captioning")
model=AutoModelForCausalLM.from_pretrained("alibidaran/General_image_captioning").to(device)
def generate_caption(image,length):
  encoded=processor(images=image, return_tensors="pt").to(device)
  pixels=encoded['pixel_values'].to(device)
  with torch.no_grad():
    generated_ids=model.generate(pixel_values=pixels,max_length=length)
  generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
  return generated_caption 
demo=gr.Interface(
    fn=generate_caption,
    inputs=[
        gr.Image(type='pil',flagging_options=["blurry", "incorrect", "other"]),
        gr.Slider(10,50,value=10)
    ],
    outputs= 'label',
    examples=["sample.jpg","sample1.jpg","sample2.jpg","sample3.jpg","sample4.jpg"]
      theme=gr.themes.Soft(primary_hue='purple',secondary_hue=gr.themes.colors.gray)
)
demo.launch(show_error=True)