aifeifei798 commited on
Commit
704a53e
1 Parent(s): 79ce0f7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -12
app.py CHANGED
@@ -3,11 +3,11 @@ from transformers import AutoProcessor, AutoModelForCausalLM
3
  import spaces
4
  import re
5
  from PIL import Image
6
-
7
  import subprocess
8
- subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
9
-
10
- model = AutoModelForCausalLM.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True).eval()
11
 
12
  processor = AutoProcessor.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True)
13
 
@@ -84,14 +84,6 @@ with gr.Blocks(css=css) as demo:
84
  with gr.Column():
85
  output_text = gr.Textbox(label="Output Text")
86
 
87
- gr.Examples(
88
- [["image1.jpg"], ["image2.jpg"], ["image3.png"], ["image4.jpg"], ["image5.jpg"], ["image6.PNG"]],
89
- inputs = [input_img],
90
- outputs = [output_text],
91
- fn=run_example,
92
- label='Try captioning on below examples'
93
- )
94
-
95
  submit_btn.click(run_example, [input_img], [output_text])
96
 
97
  demo.launch(debug=True)
 
3
  import spaces
4
  import re
5
  from PIL import Image
6
+ import torch
7
  import subprocess
8
+ #subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
9
+ device = "cuda" if torch.cuda.is_available() else "cpu"
10
+ model = AutoModelForCausalLM.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True).to(device).eval()
11
 
12
  processor = AutoProcessor.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True)
13
 
 
84
  with gr.Column():
85
  output_text = gr.Textbox(label="Output Text")
86
 
 
 
 
 
 
 
 
 
87
  submit_btn.click(run_example, [input_img], [output_text])
88
 
89
  demo.launch(debug=True)