Spaces:
Running
on
Zero
Running
on
Zero
update
Browse files- app.py +2 -2
- flux1_img2img.py +1 -1
app.py
CHANGED
@@ -86,8 +86,8 @@ with demo_blocks as demo:
|
|
86 |
image_mask = gr.Image(sources=['upload','clipboard'], elem_id="mask_upload", type="pil", label="Mask_Upload",height=400, value=None)
|
87 |
with gr.Accordion(label="Advanced Settings", open=False):
|
88 |
with gr.Row( equal_height=True):
|
89 |
-
strength = gr.Number(value=0.75, minimum=0, maximum=1.0, step=0.01, label="
|
90 |
-
seed = gr.Number(value=0, minimum=0, step=1, label="
|
91 |
models = ["black-forest-labs/FLUX.1-schnell"]
|
92 |
inpaint_model = gr.Dropdown(label="modes", choices=models, value="black-forest-labs/FLUX.1-schnell")
|
93 |
id_input=gr.Text(label="Name", visible=False)
|
|
|
86 |
image_mask = gr.Image(sources=['upload','clipboard'], elem_id="mask_upload", type="pil", label="Mask_Upload",height=400, value=None)
|
87 |
with gr.Accordion(label="Advanced Settings", open=False):
|
88 |
with gr.Row( equal_height=True):
|
89 |
+
strength = gr.Number(value=0.75, minimum=0, maximum=1.0, step=0.01, label="strength")
|
90 |
+
seed = gr.Number(value=0, minimum=0, step=1, label="seed")
|
91 |
models = ["black-forest-labs/FLUX.1-schnell"]
|
92 |
inpaint_model = gr.Dropdown(label="modes", choices=models, value="black-forest-labs/FLUX.1-schnell")
|
93 |
id_input=gr.Text(label="Name", visible=False)
|
flux1_img2img.py
CHANGED
@@ -20,7 +20,7 @@ def process_image(image,mask_image,prompt="a person",model_id="black-forest-labs
|
|
20 |
generators.append(generator)
|
21 |
# more parameter see https://huggingface.co/docs/diffusers/api/pipelines/flux#diffusers.FluxInpaintPipeline
|
22 |
output = pipe(prompt=prompt, image=image,generator=generator,strength=strength
|
23 |
-
,guidance_scale=0,num_inference_steps=num_inference_steps)
|
24 |
|
25 |
# TODO support mask
|
26 |
return output.images[0]
|
|
|
20 |
generators.append(generator)
|
21 |
# more parameter see https://huggingface.co/docs/diffusers/api/pipelines/flux#diffusers.FluxInpaintPipeline
|
22 |
output = pipe(prompt=prompt, image=image,generator=generator,strength=strength
|
23 |
+
,guidance_scale=0,num_inference_steps=num_inference_steps,max_sequence_length=512)
|
24 |
|
25 |
# TODO support mask
|
26 |
return output.images[0]
|