Spaces:
Running
on
Zero
Running
on
Zero
Upload dc.py
Browse files
dc.py
CHANGED
@@ -679,14 +679,14 @@ class GuiSD:
|
|
679 |
#progress(1, desc="Inference preparation completed. Starting inference...")
|
680 |
|
681 |
info_state = "" # for yield version
|
682 |
-
return self.infer_short(self.model, pipe_params)
|
683 |
## END MOD
|
684 |
|
685 |
def dynamic_gpu_duration(func, duration, *args):
|
686 |
|
687 |
@spaces.GPU(duration=duration)
|
688 |
def wrapped_func():
|
689 |
-
|
690 |
|
691 |
return wrapped_func()
|
692 |
|
@@ -712,8 +712,8 @@ def sd_gen_generate_pipeline(*args):
|
|
712 |
if load_lora_cpu:
|
713 |
msg_load_lora = "Updating LoRAs in CPU (Slow but saves GPU usage)..."
|
714 |
|
715 |
-
if lora_list != sd_gen.model.lora_memory and lora_list != [None] * 5:
|
716 |
-
|
717 |
|
718 |
# Load lora in CPU
|
719 |
if load_lora_cpu:
|
@@ -747,7 +747,7 @@ def sd_gen_generate_pipeline(*args):
|
|
747 |
|
748 |
start_time = time.time()
|
749 |
|
750 |
-
|
751 |
sd_gen.generate_pipeline,
|
752 |
gpu_duration_arg,
|
753 |
*generation_args,
|
@@ -817,7 +817,7 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
817 |
sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0])
|
818 |
progress(1, desc="Model loaded.")
|
819 |
progress(0, desc="Starting Inference...")
|
820 |
-
images = sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
|
821 |
guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
|
822 |
lora4, lora4_wt, lora5, lora5_wt, sampler,
|
823 |
height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
|
|
|
679 |
#progress(1, desc="Inference preparation completed. Starting inference...")
|
680 |
|
681 |
info_state = "" # for yield version
|
682 |
+
return self.infer_short(self.model, pipe_params), info_state
|
683 |
## END MOD
|
684 |
|
685 |
def dynamic_gpu_duration(func, duration, *args):
|
686 |
|
687 |
@spaces.GPU(duration=duration)
|
688 |
def wrapped_func():
|
689 |
+
return func(*args)
|
690 |
|
691 |
return wrapped_func()
|
692 |
|
|
|
712 |
if load_lora_cpu:
|
713 |
msg_load_lora = "Updating LoRAs in CPU (Slow but saves GPU usage)..."
|
714 |
|
715 |
+
#if lora_list != sd_gen.model.lora_memory and lora_list != [None] * 5:
|
716 |
+
# yield None, msg_load_lora
|
717 |
|
718 |
# Load lora in CPU
|
719 |
if load_lora_cpu:
|
|
|
747 |
|
748 |
start_time = time.time()
|
749 |
|
750 |
+
return dynamic_gpu_duration(
|
751 |
sd_gen.generate_pipeline,
|
752 |
gpu_duration_arg,
|
753 |
*generation_args,
|
|
|
817 |
sd_gen.load_new_model(model_name, vae, TASK_MODEL_LIST[0])
|
818 |
progress(1, desc="Model loaded.")
|
819 |
progress(0, desc="Starting Inference...")
|
820 |
+
images, info = sd_gen_generate_pipeline(prompt, negative_prompt, 1, num_inference_steps,
|
821 |
guidance_scale, True, generator, lora1, lora1_wt, lora2, lora2_wt, lora3, lora3_wt,
|
822 |
lora4, lora4_wt, lora5, lora5_wt, sampler,
|
823 |
height, width, model_name, vae, TASK_MODEL_LIST[0], None, "Canny", 512, 1024,
|