Mediocreatmybest commited on
Commit
52fd1d4
1 Parent(s): fc57202

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -14
app.py CHANGED
@@ -1,29 +1,27 @@
1
- from PIL import Image
2
- import requests
3
  import torch
4
  import gradio as gr
5
-
6
  from transformers import pipeline
7
 
8
  CAPTION_MODELS = {
9
  'blip-base': 'Salesforce/blip-image-captioning-base',
10
  'blip-large': 'Salesforce/blip-image-captioning-large',
11
  'vit-gpt2-coco-en': 'ydshieh/vit-gpt2-coco-en',
 
12
  }
13
 
14
- captioner = pipeline(task="image-to-text",
15
- model=CAPTION_MODELS['blip-base'],
16
- max_new_tokens=30,
17
- device_map="cpu", use_fast=True
18
- )
19
-
20
  # Simple caption creation
21
- def caption_image(captioner, image_path):
 
 
 
 
 
22
  caption = captioner(image_path)[0]['generated_text']
23
  return str(caption).strip()
24
 
25
- def launch(input):
26
- return caption_image(captioner, input)
27
 
28
- iface = gr.Interface(launch, inputs="text", outputs="text")
29
- iface.launch()
 
 
 
 
1
  import torch
2
  import gradio as gr
 
3
  from transformers import pipeline
4
 
5
  CAPTION_MODELS = {
6
  'blip-base': 'Salesforce/blip-image-captioning-base',
7
  'blip-large': 'Salesforce/blip-image-captioning-large',
8
  'vit-gpt2-coco-en': 'ydshieh/vit-gpt2-coco-en',
9
+ 'blip2-2.7b-fp16': 'Mediocreatmybest/blip2-opt-2.7b-fp16-sharded',
10
  }
11
 
 
 
 
 
 
 
12
  # Simple caption creation
13
+ def caption_image(model_choice, image_path):
14
+ captioner = pipeline(task="image-to-text",
15
+ model=CAPTION_MODELS[model_choice],
16
+ max_new_tokens=30,
17
+ device_map="cpu", use_fast=True
18
+ )
19
  caption = captioner(image_path)[0]['generated_text']
20
  return str(caption).strip()
21
 
22
+ def launch(model_choice, input):
23
+ return caption_image(model_choice, input)
24
 
25
+ model_dropdown = gr.inputs.Dropdown(choices=list(CAPTION_MODELS.keys()), label='Model Choice')
26
+ iface = gr.Interface(launch, inputs=[model_dropdown, "text"], outputs="text")
27
+ iface.launch()