Spaces:
Running
Running
Support only CPU
Browse filesSigned-off-by: Aisuko <[email protected]>
app.py
CHANGED
@@ -14,16 +14,16 @@ from diffusers import (
|
|
14 |
EulerDiscreteScheduler,
|
15 |
)
|
16 |
|
17 |
-
controlnet = ControlNetModel.from_pretrained(
|
18 |
-
|
19 |
-
)
|
20 |
|
21 |
-
pipe= StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
).to("cuda")
|
27 |
|
28 |
|
29 |
SAMPLER_MAP={
|
@@ -36,60 +36,60 @@ SAMPLER_MAP={
|
|
36 |
}
|
37 |
|
38 |
|
39 |
-
def inference(
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
):
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
|
91 |
-
|
92 |
-
|
93 |
|
94 |
def inference_ui_demo():
|
95 |
return None
|
|
|
14 |
EulerDiscreteScheduler,
|
15 |
)
|
16 |
|
17 |
+
# controlnet = ControlNetModel.from_pretrained(
|
18 |
+
# "DionTimmer/controlnet_qrcode-control_v1p_sd15", torch_dtype=torch.float16
|
19 |
+
# )
|
20 |
|
21 |
+
# pipe= StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
|
22 |
+
# "runwayml/stable-diffusion-v1-5",
|
23 |
+
# controlnet=controlnet,
|
24 |
+
# use_safetensors=True,
|
25 |
+
# torch_dtype=torch.float16,
|
26 |
+
# ).to("cuda")
|
27 |
|
28 |
|
29 |
SAMPLER_MAP={
|
|
|
36 |
}
|
37 |
|
38 |
|
39 |
+
# def inference(
|
40 |
+
# qr_code_content: str,
|
41 |
+
# prompt: str,
|
42 |
+
# negative_prompt: str,
|
43 |
+
# guidance_scale: float = 10.0,
|
44 |
+
# controlnet_conditioning_scale: float = 2.0,
|
45 |
+
# strength: float = 0.8,
|
46 |
+
# seed: int = -1,
|
47 |
+
# init_image: Image.Image | None = None,
|
48 |
+
# qrcode_image: Image.Image | None = None,
|
49 |
+
# sampler = "DPM++ Karras SDE",
|
50 |
+
# ):
|
51 |
+
# if prompt is None or prompt == "":
|
52 |
+
# raise gr.Error("Prompt is required")
|
53 |
+
|
54 |
+
# if qrcode_image is None and qr_code_content == "":
|
55 |
+
# raise gr.Error("QR Code Image or QR Code Content is required")
|
56 |
+
|
57 |
+
# pipe.scheduler = SAMPLER_MAP[sampler](pipe.scheduler.config)
|
58 |
+
|
59 |
+
# generator = torch.manual_seed(seed) if seed != -1 else torch.Generator()
|
60 |
+
|
61 |
+
# if qr_code_content != "" or qrcode_image.size == (1, 1):
|
62 |
+
# qr = qrcode.QRCode(
|
63 |
+
# version=1,
|
64 |
+
# error_correction=qrcode.constants.ERROR_CORRECT_H,
|
65 |
+
# box_size=10,
|
66 |
+
# border=4,
|
67 |
+
# )
|
68 |
+
# qr.add_data(qr_code_content)
|
69 |
+
# qr.make(fit=True)
|
70 |
+
|
71 |
+
# qrcode_image = qr.make_image(fill_color="black", back_color="white")
|
72 |
+
# qrcode_image = qrcode_image.resize((768, 768))
|
73 |
+
# else:
|
74 |
+
# qrcode_image = qrcode_image.resize((768, 768))
|
75 |
+
|
76 |
+
# # hack due to gradio examples
|
77 |
+
# init_image = qrcode_image
|
78 |
+
|
79 |
+
# out = pipe(
|
80 |
+
# prompt=prompt,
|
81 |
+
# negative_prompt=negative_prompt,
|
82 |
+
# image=init_image,
|
83 |
+
# control_image=qrcode_image, # type: ignore
|
84 |
+
# width=768, # type: ignore
|
85 |
+
# height=768, # type: ignore
|
86 |
+
# guidance_scale=float(guidance_scale),
|
87 |
+
# controlnet_conditioning_scale=float(controlnet_conditioning_scale), # type: ignore
|
88 |
+
# generator=generator,
|
89 |
+
# strength=float(strength),
|
90 |
+
# num_inference_steps=40,
|
91 |
+
# )
|
92 |
+
# return out.images[0] # type: ignore
|
93 |
|
94 |
def inference_ui_demo():
|
95 |
return None
|