alibidaran commited on
Commit
89c432d
1 Parent(s): 440d1cb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -4
app.py CHANGED
@@ -4,18 +4,17 @@ import gradio as gr
4
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
5
  processor=AutoProcessor.from_pretrained("alibidaran/General_image_captioning")
6
  model=AutoModelForCausalLM.from_pretrained("alibidaran/General_image_captioning").to(device)
7
- def generate_caption(image,length):
8
  encoded=processor(images=image, return_tensors="pt").to(device)
9
  pixels=encoded['pixel_values'].to(device)
10
  with torch.no_grad():
11
- generated_ids=model.generate(pixel_values=pixels,max_length=length)
12
  generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
13
  return generated_caption
14
  demo=gr.Interface(
15
  fn=generate_caption,
16
  inputs=[
17
- gr.Image(type='pil',flagging_options=["blurry", "incorrect", "other"]),
18
- gr.Slider(10,50,value=10)
19
  ],
20
  outputs= 'label',
21
  examples=['sample.jpg','sample1.jpg','sample2.jpg'],
 
4
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
5
  processor=AutoProcessor.from_pretrained("alibidaran/General_image_captioning")
6
  model=AutoModelForCausalLM.from_pretrained("alibidaran/General_image_captioning").to(device)
7
+ def generate_caption(image):
8
  encoded=processor(images=image, return_tensors="pt").to(device)
9
  pixels=encoded['pixel_values'].to(device)
10
  with torch.no_grad():
11
+ generated_ids=model.generate(pixel_values=pixels,max_length=10)
12
  generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
13
  return generated_caption
14
  demo=gr.Interface(
15
  fn=generate_caption,
16
  inputs=[
17
+ gr.Image(type='pil'),
 
18
  ],
19
  outputs= 'label',
20
  examples=['sample.jpg','sample1.jpg','sample2.jpg'],