Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
import torch
|
2 |
import spaces
|
3 |
from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL
|
|
|
|
|
4 |
from ip_adapter.ip_adapter_faceid import IPAdapterFaceID, IPAdapterFaceIDPlus
|
5 |
from huggingface_hub import hf_hub_download
|
6 |
from insightface.app import FaceAnalysis
|
@@ -14,6 +16,9 @@ image_encoder_path = "laion/CLIP-ViT-H-14-laion2B-s32B-b79K"
|
|
14 |
ip_ckpt = hf_hub_download(repo_id="h94/IP-Adapter-FaceID", filename="ip-adapter-faceid_sd15.bin", repo_type="model")
|
15 |
ip_plus_ckpt = hf_hub_download(repo_id="h94/IP-Adapter-FaceID", filename="ip-adapter-faceid-plusv2_sd15.bin", repo_type="model")
|
16 |
|
|
|
|
|
|
|
17 |
|
18 |
device = "cuda"
|
19 |
|
@@ -32,8 +37,13 @@ pipe = StableDiffusionPipeline.from_pretrained(
|
|
32 |
torch_dtype=torch.float16,
|
33 |
scheduler=noise_scheduler,
|
34 |
vae=vae,
|
|
|
|
|
35 |
)
|
36 |
|
|
|
|
|
|
|
37 |
ip_model = IPAdapterFaceID(pipe, ip_ckpt, device)
|
38 |
ip_model_plus = IPAdapterFaceIDPlus(pipe, image_encoder_path, ip_plus_ckpt, device)
|
39 |
|
@@ -88,8 +98,8 @@ css = '''
|
|
88 |
h1{margin-bottom: 0 !important}
|
89 |
'''
|
90 |
with gr.Blocks(css=css) as demo:
|
91 |
-
gr.Markdown("# IP-Adapter-FaceID demo")
|
92 |
-
gr.Markdown("Demo for the [h94/IP-Adapter-FaceID model](https://huggingface.co/h94/IP-Adapter-FaceID) -
|
93 |
with gr.Row():
|
94 |
with gr.Column():
|
95 |
files = gr.Files(
|
@@ -106,10 +116,10 @@ with gr.Blocks(css=css) as demo:
|
|
106 |
style = gr.Radio(label="Generation type", info="For stylized try prompts like 'a watercolor painting of a woman'", choices=["Photorealistic", "Stylized"], value="Photorealistic")
|
107 |
submit = gr.Button("Submit")
|
108 |
with gr.Accordion(open=False, label="Advanced Options"):
|
109 |
-
preserve = gr.Checkbox(label="Preserve Face Structure", info="Higher quality, less versatility (the face structure of your first photo will be preserved)", value=True)
|
110 |
face_strength = gr.Slider(label="Face Structure strength", info="Only applied if preserve face structure is checked", value=1.3, step=0.1, minimum=0, maximum=3)
|
111 |
likeness_strength = gr.Slider(label="Face Embed strength", value=1.0, step=0.1, minimum=0, maximum=5)
|
112 |
-
nfaa_negative_prompts = gr.Textbox(label="Appended Negative Prompts", info="Negative prompts to steer generations towards safe for all audiences outputs", value="
|
113 |
with gr.Column():
|
114 |
gallery = gr.Gallery(label="Generated Images")
|
115 |
style.change(fn=change_style,
|
|
|
1 |
import torch
|
2 |
import spaces
|
3 |
from diffusers import StableDiffusionPipeline, DDIMScheduler, AutoencoderKL
|
4 |
+
from transformers import AutoFeatureExtractor
|
5 |
+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
|
6 |
from ip_adapter.ip_adapter_faceid import IPAdapterFaceID, IPAdapterFaceIDPlus
|
7 |
from huggingface_hub import hf_hub_download
|
8 |
from insightface.app import FaceAnalysis
|
|
|
16 |
ip_ckpt = hf_hub_download(repo_id="h94/IP-Adapter-FaceID", filename="ip-adapter-faceid_sd15.bin", repo_type="model")
|
17 |
ip_plus_ckpt = hf_hub_download(repo_id="h94/IP-Adapter-FaceID", filename="ip-adapter-faceid-plusv2_sd15.bin", repo_type="model")
|
18 |
|
19 |
+
safety_model_id = "CompVis/stable-diffusion-safety-checker"
|
20 |
+
safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id)
|
21 |
+
safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id)
|
22 |
|
23 |
device = "cuda"
|
24 |
|
|
|
37 |
torch_dtype=torch.float16,
|
38 |
scheduler=noise_scheduler,
|
39 |
vae=vae,
|
40 |
+
feature_extractor=safety_feature_extractor,
|
41 |
+
safety_checker=None
|
42 |
)
|
43 |
|
44 |
+
#pipe.load_lora_weights("h94/IP-Adapter-FaceID", weight_name="ip-adapter-faceid-plusv2_sd15_lora.safetensors")
|
45 |
+
#pipe.fuse_lora()
|
46 |
+
|
47 |
ip_model = IPAdapterFaceID(pipe, ip_ckpt, device)
|
48 |
ip_model_plus = IPAdapterFaceIDPlus(pipe, image_encoder_path, ip_plus_ckpt, device)
|
49 |
|
|
|
98 |
h1{margin-bottom: 0 !important}
|
99 |
'''
|
100 |
with gr.Blocks(css=css) as demo:
|
101 |
+
gr.Markdown("# IP-Adapter-FaceID Plus demo")
|
102 |
+
gr.Markdown("Demo for the [h94/IP-Adapter-FaceID model](https://huggingface.co/h94/IP-Adapter-FaceID) - Non-commercial license")
|
103 |
with gr.Row():
|
104 |
with gr.Column():
|
105 |
files = gr.Files(
|
|
|
116 |
style = gr.Radio(label="Generation type", info="For stylized try prompts like 'a watercolor painting of a woman'", choices=["Photorealistic", "Stylized"], value="Photorealistic")
|
117 |
submit = gr.Button("Submit")
|
118 |
with gr.Accordion(open=False, label="Advanced Options"):
|
119 |
+
preserve = gr.Checkbox(label="Preserve Face Structure", info="Higher quality, less versatility (the face structure of your first photo will be preserved). Unchecking this will use the v1 model.", value=True)
|
120 |
face_strength = gr.Slider(label="Face Structure strength", info="Only applied if preserve face structure is checked", value=1.3, step=0.1, minimum=0, maximum=3)
|
121 |
likeness_strength = gr.Slider(label="Face Embed strength", value=1.0, step=0.1, minimum=0, maximum=5)
|
122 |
+
nfaa_negative_prompts = gr.Textbox(label="Appended Negative Prompts", info="Negative prompts to steer generations towards safe for all audiences outputs", value="naked, bikini, skimpy, scanty, bare skin, lingerie, swimsuit, exposed, see-through")
|
123 |
with gr.Column():
|
124 |
gallery = gr.Gallery(label="Generated Images")
|
125 |
style.change(fn=change_style,
|