freddyaboulton HF staff commited on
Commit
d33e5d1
1 Parent(s): d5e3b18
Files changed (1) hide show
  1. app.py +18 -18
app.py CHANGED
@@ -6,17 +6,17 @@ torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/0000000397
6
  torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
7
  torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
8
 
9
- git_processor_base = AutoProcessor.from_pretrained("microsoft/git-base-coco")
10
- git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
11
 
12
- git_processor_large = AutoProcessor.from_pretrained("microsoft/git-large-coco")
13
- git_model_large = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
14
 
15
- blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
16
- blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
17
 
18
- blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
19
- blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
20
 
21
  vitgpt_processor = AutoImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
22
  vitgpt_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
@@ -24,10 +24,10 @@ vitgpt_tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-capt
24
 
25
  device = "cuda" if torch.cuda.is_available() else "cpu"
26
 
27
- git_model_base.to(device)
28
- blip_model_base.to(device)
29
- git_model_large.to(device)
30
- blip_model_large.to(device)
31
  vitgpt_model.to(device)
32
 
33
  def generate_caption(processor, model, image, tokenizer=None):
@@ -44,21 +44,21 @@ def generate_caption(processor, model, image, tokenizer=None):
44
 
45
 
46
  def generate_captions(image):
47
- caption_git_base = generate_caption(git_processor_base, git_model_base, image)
48
 
49
- caption_git_large = generate_caption(git_processor_large, git_model_large, image)
50
 
51
- caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image)
52
 
53
- caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image)
54
 
55
  caption_vitgpt = generate_caption(vitgpt_processor, vitgpt_model, image, vitgpt_tokenizer)
56
 
57
- return caption_git_base, caption_git_large, caption_blip_base, caption_blip_large, caption_vitgpt
58
 
59
 
60
  examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
61
- outputs = [gr.outputs.Textbox(label="Caption generated by GIT-base"), gr.outputs.Textbox(label="Caption generated by GIT-large"), gr.outputs.Textbox(label="Caption generated by BLIP-base"), gr.outputs.Textbox(label="Caption generated by BLIP-large"), gr.outputs.Textbox(label="Caption generated by ViT+GPT-2")]
62
 
63
  title = "Interactive demo: comparing image captioning models"
64
  description = "Gradio Demo to compare GIT, BLIP and ViT+GPT2, 3 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
 
6
  torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png')
7
  torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg')
8
 
9
+ # git_processor_base = AutoProcessor.from_pretrained("microsoft/git-base-coco")
10
+ # git_model_base = AutoModelForCausalLM.from_pretrained("microsoft/git-base-coco")
11
 
12
+ # git_processor_large = AutoProcessor.from_pretrained("microsoft/git-large-coco")
13
+ # git_model_large = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco")
14
 
15
+ # blip_processor_base = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
16
+ # blip_model_base = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
17
 
18
+ # blip_processor_large = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
19
+ # blip_model_large = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
20
 
21
  vitgpt_processor = AutoImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
22
  vitgpt_model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
 
24
 
25
  device = "cuda" if torch.cuda.is_available() else "cpu"
26
 
27
+ # git_model_base.to(device)
28
+ # blip_model_base.to(device)
29
+ # git_model_large.to(device)
30
+ # blip_model_large.to(device)
31
  vitgpt_model.to(device)
32
 
33
  def generate_caption(processor, model, image, tokenizer=None):
 
44
 
45
 
46
  def generate_captions(image):
47
+ # caption_git_base = generate_caption(git_processor_base, git_model_base, image)
48
 
49
+ # caption_git_large = generate_caption(git_processor_large, git_model_large, image)
50
 
51
+ # caption_blip_base = generate_caption(blip_processor_base, blip_model_base, image)
52
 
53
+ # caption_blip_large = generate_caption(blip_processor_large, blip_model_large, image)
54
 
55
  caption_vitgpt = generate_caption(vitgpt_processor, vitgpt_model, image, vitgpt_tokenizer)
56
 
57
+ return caption_vitgpt
58
 
59
 
60
  examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]]
61
+ outputs = [gr.outputs.Textbox(label="Caption generated by ViT+GPT-2")]
62
 
63
  title = "Interactive demo: comparing image captioning models"
64
  description = "Gradio Demo to compare GIT, BLIP and ViT+GPT2, 3 state-of-the-art vision+language models. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."