Spaces:
Runtime error
Runtime error
Upload 9 files
Browse files- app.py +2 -0
- tagger/fl2sd3longcap.py +4 -2
app.py
CHANGED
@@ -143,6 +143,8 @@ with gr.Blocks(theme="NoCrypt/miku@>=1.2.2", css=css) as demo:
|
|
143 |
convert_danbooru_to_e621_prompt, [prompt, v2_tag_type], [prompt], queue=False, show_api=False,
|
144 |
)
|
145 |
tagger_generate_from_image.click(
|
|
|
|
|
146 |
predict_tags_wd,
|
147 |
[tagger_image, prompt, tagger_algorithms, tagger_general_threshold, tagger_character_threshold],
|
148 |
[v2_series, v2_character, prompt, v2_copy],
|
|
|
143 |
convert_danbooru_to_e621_prompt, [prompt, v2_tag_type], [prompt], queue=False, show_api=False,
|
144 |
)
|
145 |
tagger_generate_from_image.click(
|
146 |
+
lambda: ("", "", ""), None, [v2_series, v2_character, prompt], queue=False,
|
147 |
+
).success(
|
148 |
predict_tags_wd,
|
149 |
[tagger_image, prompt, tagger_algorithms, tagger_general_threshold, tagger_character_threshold],
|
150 |
[v2_series, v2_character, prompt, v2_copy],
|
tagger/fl2sd3longcap.py
CHANGED
@@ -2,11 +2,13 @@ from transformers import AutoProcessor, AutoModelForCausalLM
|
|
2 |
import spaces
|
3 |
import re
|
4 |
from PIL import Image
|
|
|
5 |
|
6 |
import subprocess
|
7 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
8 |
|
9 |
-
|
|
|
10 |
fl_processor = AutoProcessor.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True)
|
11 |
|
12 |
|
@@ -48,7 +50,7 @@ def fl_run_example(image):
|
|
48 |
if image.mode != "RGB":
|
49 |
image = image.convert("RGB")
|
50 |
|
51 |
-
inputs = fl_processor(text=prompt, images=image, return_tensors="pt")
|
52 |
generated_ids = fl_model.generate(
|
53 |
input_ids=inputs["input_ids"],
|
54 |
pixel_values=inputs["pixel_values"],
|
|
|
2 |
import spaces
|
3 |
import re
|
4 |
from PIL import Image
|
5 |
+
import torch
|
6 |
|
7 |
import subprocess
|
8 |
subprocess.run('pip install flash-attn --no-build-isolation', env={'FLASH_ATTENTION_SKIP_CUDA_BUILD': "TRUE"}, shell=True)
|
9 |
|
10 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
11 |
+
fl_model = AutoModelForCausalLM.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True).to(device).eval()
|
12 |
fl_processor = AutoProcessor.from_pretrained('gokaygokay/Florence-2-SD3-Captioner', trust_remote_code=True)
|
13 |
|
14 |
|
|
|
50 |
if image.mode != "RGB":
|
51 |
image = image.convert("RGB")
|
52 |
|
53 |
+
inputs = fl_processor(text=prompt, images=image, return_tensors="pt").to(device)
|
54 |
generated_ids = fl_model.generate(
|
55 |
input_ids=inputs["input_ids"],
|
56 |
pixel_values=inputs["pixel_values"],
|