tombetthauser commited on
Commit
69f4625
β€’
1 Parent(s): aaff49e

Try moving gradio setup to after controlnet pipecreation

Browse files
Files changed (1) hide show
  1. app.py +64 -44
app.py CHANGED
@@ -83,7 +83,7 @@ def load_learned_embed_in_clip(learned_embeds_path, text_encoder, tokenizer, tok
83
 
84
 
85
 
86
- # ----- ControlNet Canny Edges -----------------------------------------------------------------
87
 
88
  import gradio as gr
89
  from PIL import Image
@@ -103,49 +103,6 @@ controlnet_pipe.scheduler = UniPCMultistepScheduler.from_config(controlnet_pipe.
103
  controlnet_pipe.enable_model_cpu_offload()
104
  controlnet_pipe.enable_xformers_memory_efficient_attention()
105
 
106
- def controlnet_edges(canny_input_prompt, input_image, input_low_threshold, input_high_threshold, input_invert):
107
- np_image = np.array(input_image)
108
-
109
- output_image = input_image
110
- numpy_image = np.array(output_image)
111
-
112
- low_threshold = 80
113
- high_threshold = 100
114
- canny_1 = cv2.Canny(numpy_image, input_low_threshold, input_high_threshold)
115
- canny_1 = canny_1[:, :, None]
116
- canny_1 = np.concatenate([canny_1, canny_1, canny_1], axis=2)
117
- if input_invert:
118
- canny_1 = 255 - canny_1
119
-
120
- canny_2 = Image.fromarray(canny_1)
121
-
122
- prompt = canny_input_prompt
123
- generator = torch.Generator(device="cpu").manual_seed(2)
124
-
125
- output_image = controlnet_pipe(
126
- prompt,
127
- canny_2,
128
- negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
129
- generator=generator,
130
- num_inference_steps=20,
131
- )
132
-
133
- return output_image[0][0]
134
-
135
-
136
- canny_input_prompt = gr.inputs.Textbox(label="Enter a single word or phrase")
137
- canny_input_image = gr.inputs.Image()
138
- canny_input_low_threshold = gr.inputs.Slider(minimum=0, maximum=1000, step=1, label="Lower Threshold:", default=100)
139
- canny_input_high_threshold = gr.inputs.Slider(minimum=0, maximum=1000, step=1, label="Upper Threshold:", default=200)
140
- canny_input_invert = gr.inputs.Checkbox(label="Invert Image")
141
- canny_outputs = gr.outputs.Image(type="pil")
142
-
143
- # make and launch the gradio app...
144
- controlnet_canny_interface = gr.Interface(fn=controlnet_edges, inputs=[canny_input_prompt, canny_input_image, canny_input_low_threshold, canny_input_high_threshold, canny_input_invert], outputs=canny_outputs, title='Canny Edge Tracing', allow_flagging='never')
145
- # controlnet_canny_interface.launch()
146
-
147
-
148
-
149
 
150
 
151
 
@@ -486,6 +443,69 @@ canny_interface = gr.Interface(fn=canny_process_image, inputs=[canny_input_image
486
 
487
 
488
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
489
  # ----- Launch Tabs -----------------------------------------------------------------
490
 
491
  tabbed_interface = gr.TabbedInterface([new_welcome, advanced_tab, beta, canny_interface, controlnet_canny_interface], ["Artbots", "Advanced", "Beta", "Edges", "ControlNet"])
 
83
 
84
 
85
 
86
+ # ----- ControlNet Canny Edges Pipe / Setup -----------------------------------------------------------------
87
 
88
  import gradio as gr
89
  from PIL import Image
 
103
  controlnet_pipe.enable_model_cpu_offload()
104
  controlnet_pipe.enable_xformers_memory_efficient_attention()
105
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
 
108
 
 
443
 
444
 
445
 
446
+ # ----- ControlNet Canny Gradio Setup -----------------------------------------------------------------
447
+
448
+ # import gradio as gr
449
+ # from PIL import Image
450
+ # import numpy as np
451
+ # import cv2
452
+
453
+ # from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
454
+ # from diffusers import UniPCMultistepScheduler
455
+ # import torch
456
+
457
+ # controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
458
+ # controlnet_pipe = StableDiffusionControlNetPipeline.from_pretrained(
459
+ # "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
460
+ # )
461
+
462
+ # controlnet_pipe.scheduler = UniPCMultistepScheduler.from_config(controlnet_pipe.scheduler.config)
463
+ # controlnet_pipe.enable_model_cpu_offload()
464
+ # controlnet_pipe.enable_xformers_memory_efficient_attention()
465
+
466
+ def controlnet_edges(canny_input_prompt, input_image, input_low_threshold, input_high_threshold, input_invert):
467
+ np_image = np.array(input_image)
468
+
469
+ output_image = input_image
470
+ numpy_image = np.array(output_image)
471
+
472
+ low_threshold = 80
473
+ high_threshold = 100
474
+ canny_1 = cv2.Canny(numpy_image, input_low_threshold, input_high_threshold)
475
+ canny_1 = canny_1[:, :, None]
476
+ canny_1 = np.concatenate([canny_1, canny_1, canny_1], axis=2)
477
+ if input_invert:
478
+ canny_1 = 255 - canny_1
479
+
480
+ canny_2 = Image.fromarray(canny_1)
481
+
482
+ prompt = canny_input_prompt
483
+ generator = torch.Generator(device="cpu").manual_seed(2)
484
+
485
+ output_image = controlnet_pipe(
486
+ prompt,
487
+ canny_2,
488
+ negative_prompt="monochrome, lowres, bad anatomy, worst quality, low quality",
489
+ generator=generator,
490
+ num_inference_steps=20,
491
+ )
492
+
493
+ return output_image[0][0]
494
+
495
+
496
+ canny_input_prompt = gr.inputs.Textbox(label="Enter a single word or phrase")
497
+ canny_input_image = gr.inputs.Image()
498
+ canny_input_low_threshold = gr.inputs.Slider(minimum=0, maximum=1000, step=1, label="Lower Threshold:", default=100)
499
+ canny_input_high_threshold = gr.inputs.Slider(minimum=0, maximum=1000, step=1, label="Upper Threshold:", default=200)
500
+ canny_input_invert = gr.inputs.Checkbox(label="Invert Image")
501
+ canny_outputs = gr.outputs.Image(type="pil")
502
+
503
+ # make and launch the gradio app...
504
+ controlnet_canny_interface = gr.Interface(fn=controlnet_edges, inputs=[canny_input_prompt, canny_input_image, canny_input_low_threshold, canny_input_high_threshold, canny_input_invert], outputs=canny_outputs, title='Canny Edge Tracing', allow_flagging='never')
505
+ # controlnet_canny_interface.launch()
506
+
507
+
508
+
509
  # ----- Launch Tabs -----------------------------------------------------------------
510
 
511
  tabbed_interface = gr.TabbedInterface([new_welcome, advanced_tab, beta, canny_interface, controlnet_canny_interface], ["Artbots", "Advanced", "Beta", "Edges", "ControlNet"])