Spaces:
Running
on
Zero
Running
on
Zero
tori29umai
commited on
Commit
•
be583dc
1
Parent(s):
3515ae5
app.py
Browse files
app.py
CHANGED
@@ -49,8 +49,6 @@ def predict(input_image_path, prompt, negative_prompt, controlnet_scale):
|
|
49 |
input_image_pil = Image.open(input_image_path)
|
50 |
base_size = input_image_pil.size
|
51 |
resize_image = resize_image_aspect_ratio(input_image_pil)
|
52 |
-
resize_image_size = resize_image.size
|
53 |
-
width, height = resize_image_size
|
54 |
white_base_pil = base_generation(resize_image.size, (255, 255, 255, 255)).convert("RGB")
|
55 |
generator = torch.manual_seed(0)
|
56 |
last_time = time.time()
|
@@ -67,14 +65,9 @@ def predict(input_image_path, prompt, negative_prompt, controlnet_scale):
|
|
67 |
strength=1.0,
|
68 |
prompt=prompt,
|
69 |
negative_prompt = negative_prompt,
|
70 |
-
|
71 |
-
height=height,
|
72 |
-
controlnet_conditioning_scale=float(controlnet_scale),
|
73 |
-
controlnet_start=0.0,
|
74 |
-
controlnet_end=1.0,
|
75 |
generator=generator,
|
76 |
-
num_inference_steps=
|
77 |
-
guidance_scale=8.5,
|
78 |
eta=1.0,
|
79 |
).images[0]
|
80 |
print(f"Time taken: {time.time() - last_time}")
|
|
|
49 |
input_image_pil = Image.open(input_image_path)
|
50 |
base_size = input_image_pil.size
|
51 |
resize_image = resize_image_aspect_ratio(input_image_pil)
|
|
|
|
|
52 |
white_base_pil = base_generation(resize_image.size, (255, 255, 255, 255)).convert("RGB")
|
53 |
generator = torch.manual_seed(0)
|
54 |
last_time = time.time()
|
|
|
65 |
strength=1.0,
|
66 |
prompt=prompt,
|
67 |
negative_prompt = negative_prompt,
|
68 |
+
controlnet_conditioning_scale=[float(controlnet_scale)],
|
|
|
|
|
|
|
|
|
69 |
generator=generator,
|
70 |
+
num_inference_steps=50,
|
|
|
71 |
eta=1.0,
|
72 |
).images[0]
|
73 |
print(f"Time taken: {time.time() - last_time}")
|