Spaces:
Running
on
Zero
Running
on
Zero
bugfix
Browse files
app.py
CHANGED
@@ -41,10 +41,10 @@ controlnet = FluxControlNetModel.from_pretrained(controlnet_model, torch_dtype=t
|
|
41 |
|
42 |
|
43 |
pipe = FluxControlNetInpaintPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16).to(device)
|
44 |
-
torch.backends.cuda.matmul.allow_tf32 = True
|
45 |
-
pipe.vae.enable_tiling()
|
46 |
-
pipe.vae.enable_slicing()
|
47 |
-
pipe.enable_model_cpu_offload() # for saving memory
|
48 |
|
49 |
control_mode_ids = {
|
50 |
"scribble_hed": 0,
|
@@ -175,7 +175,7 @@ def run_flux(
|
|
175 |
generator = torch.Generator().manual_seed(seed_slicer)
|
176 |
|
177 |
with calculateDuration("run pipe"):
|
178 |
-
print("start to run pipe")
|
179 |
generated_image = pipe(
|
180 |
prompt=prompt,
|
181 |
image=image,
|
@@ -188,7 +188,7 @@ def run_flux(
|
|
188 |
strength=strength_slider,
|
189 |
generator=generator,
|
190 |
num_inference_steps=num_inference_steps_slider,
|
191 |
-
max_sequence_length=256,
|
192 |
joint_attention_kwargs={"scale": lora_scale}
|
193 |
).images[0]
|
194 |
progress(99, "Generate image success!")
|
@@ -292,7 +292,7 @@ def process(
|
|
292 |
|
293 |
control_image = control_image.resize((width, height), Image.LANCZOS)
|
294 |
control_mode_id = control_mode_ids[control_mode]
|
295 |
-
|
296 |
try:
|
297 |
generated_image = run_flux(
|
298 |
image=image,
|
|
|
41 |
|
42 |
|
43 |
pipe = FluxControlNetInpaintPipeline.from_pretrained(base_model, controlnet=controlnet, torch_dtype=torch.bfloat16).to(device)
|
44 |
+
# torch.backends.cuda.matmul.allow_tf32 = True
|
45 |
+
# pipe.vae.enable_tiling()
|
46 |
+
# pipe.vae.enable_slicing()
|
47 |
+
# pipe.enable_model_cpu_offload() # for saving memory
|
48 |
|
49 |
control_mode_ids = {
|
50 |
"scribble_hed": 0,
|
|
|
175 |
generator = torch.Generator().manual_seed(seed_slicer)
|
176 |
|
177 |
with calculateDuration("run pipe"):
|
178 |
+
print("start to run pipe", prompt)
|
179 |
generated_image = pipe(
|
180 |
prompt=prompt,
|
181 |
image=image,
|
|
|
188 |
strength=strength_slider,
|
189 |
generator=generator,
|
190 |
num_inference_steps=num_inference_steps_slider,
|
191 |
+
# max_sequence_length=256,
|
192 |
joint_attention_kwargs={"scale": lora_scale}
|
193 |
).images[0]
|
194 |
progress(99, "Generate image success!")
|
|
|
292 |
|
293 |
control_image = control_image.resize((width, height), Image.LANCZOS)
|
294 |
control_mode_id = control_mode_ids[control_mode]
|
295 |
+
clear_cuda_cache()
|
296 |
try:
|
297 |
generated_image = run_flux(
|
298 |
image=image,
|