|
import gradio as gr |
|
import torch |
|
import modin.pandas as pd |
|
from PIL import Image |
|
from diffusers import DiffusionPipeline |
|
import os |
|
import random |
|
import torchsde |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
main_dir = "C:/Diffusers" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use_custom_hugging_face_cache_dir = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cache_directory_folder_name = "model_data" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_base_model = "sdxl" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use_safety_checker = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto_save_imagery = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
saved_images_folder_name = "saved_images" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
auto_open_browser = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
enable_close_command_prompt_button = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_use_denoising_start_in_base_model_when_using_refiner = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_base_model_output_to_refiner_is_in_latent_space = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
log_generation_times = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
use_image_gallery = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
show_image_creation_progress_log = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
show_messages_in_command_prompt = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
show_messages_in_modal_on_page = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
suppress_hugging_face_hub_offline_status = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_add_seed_into_pipe = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
max_queue_size_if_cpu = 1 |
|
max_queue_size_if_torch = 20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
allow_online_configurations = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
base_model_array = [ |
|
"sdxl", |
|
"photoreal", |
|
"sdxl_turbo", |
|
"sd_1_5_runwayml" |
|
] |
|
|
|
base_model_names_object = { |
|
"sdxl": "Stable Diffusion XL", |
|
"photoreal": "PhotoReal", |
|
"sdxl_turbo": "Stable Diffusion XL Turbo", |
|
"sd_1_5_runwayml": "Stable Diffusion 1.5" |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
base_model_object_of_model_configuration_arrays = { |
|
"sdxl": [ |
|
"sdxl_default", |
|
"sdxl_2023-11-12", |
|
"sdxl_2023-09-05" |
|
], |
|
"photoreal": [ |
|
"photoreal_default", |
|
"photoreal_3-8-1", |
|
"photoreal_3-8", |
|
"photoreal_3-7-5", |
|
"photoreal_3-6", |
|
"photoreal_2023-11-12", |
|
"photoreal_2023-09-01" |
|
], |
|
"sdxl_turbo": [ |
|
"sdxl_turbo_default" |
|
], |
|
"sd_1_5_runwayml": [ |
|
"sd_1_5_runwayml_default" |
|
] |
|
} |
|
|
|
|
|
|
|
model_configuration_names_object = { |
|
"sdxl_default": "1.0 - Default", |
|
"sdxl_2023-11-12": "1.0 (2023-11-12 online config)", |
|
"sdxl_2023-09-05": "1.0 (2023-09-05 online config)", |
|
"photoreal_default": "3.6 - Default", |
|
"photoreal_3-8-1": "3.8.1 - Default", |
|
"photoreal_3-8": "3.8 - Default", |
|
"photoreal_3-7-5": "3.7.5 - Default", |
|
"photoreal_3-6": "3.6 - Default", |
|
"photoreal_2023-11-12": "3.7.5 (2023-11-12 online config)", |
|
"photoreal_2023-09-01": "3.6 (2023-09-01 online config)", |
|
"sdxl_turbo_default": "Default", |
|
"sd_1_5_runwayml_default": "1.5 - Default" |
|
} |
|
|
|
model_configuration_links_object = { |
|
"sdxl_default": "stabilityai/stable-diffusion-xl-base-1.0", |
|
"sdxl_2023-11-12": "stabilityai/stable-diffusion-xl-base-1.0", |
|
"sdxl_2023-09-05": "stabilityai/stable-diffusion-xl-base-1.0", |
|
"photoreal_default": "circulus/canvers-realistic-v3.6", |
|
"photoreal_3-8-1": "circulus/canvers-real-v3.8.1", |
|
"photoreal_3-8": "circulus/canvers-real-v3.8", |
|
"photoreal_3-7-5": "circulus/canvers-real-v3.7.5", |
|
"photoreal_3-6": "circulus/canvers-realistic-v3.6", |
|
"photoreal_2023-11-12": "circulus/canvers-real-v3.7.5", |
|
"photoreal_2023-09-01": "circulus/canvers-realistic-v3.6", |
|
"sdxl_turbo_default": "stabilityai/sdxl-turbo", |
|
"sd_1_5_runwayml_default": "runwayml/stable-diffusion-v1-5" |
|
} |
|
|
|
model_configuration_force_refiner_object = { |
|
"sdxl_2023-11-12": 1, |
|
"sdxl_2023-09-05": 1 |
|
} |
|
|
|
|
|
|
|
|
|
model_configuration_include_refiner_number_of_steps_object = model_configuration_force_refiner_object |
|
|
|
|
|
|
|
|
|
|
|
|
|
online_configurations_object = { |
|
"sdxl_2023-11-12": 1, |
|
"sdxl_2023-09-05": 1, |
|
"photoreal_2023-11-12": 1, |
|
"photoreal_2023-09-01": 1 |
|
} |
|
|
|
|
|
|
|
hugging_face_refiner_partial_path = "stabilityai/stable-diffusion-xl-refiner-1.0" |
|
hugging_face_upscaler_partial_path = "stabilityai/sd-x2-latent-upscaler" |
|
|
|
|
|
|
|
base_model_model_configuration_defaults_object = { |
|
"sdxl": "sdxl_default", |
|
"photoreal": "photoreal_default", |
|
"sdxl_turbo": "sdxl_turbo_default", |
|
"sd_1_5_runwayml": "sd_1_5_runwayml_default" |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
default_scheduler = "model_default" |
|
|
|
|
|
|
|
schedulers_array = [ |
|
"model_default", |
|
"ddim", |
|
"ddpm", |
|
"dpm_solver_multistep", |
|
"dpm_solver_multistep_karras_sigmas_true", |
|
"dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp", |
|
"dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp", |
|
"dpm_solver_singlestep", |
|
"dpm_solver_singlestep_karras_sigmas_true", |
|
"kdpm2_discrete", |
|
"kdpm2_discrete_karras_sigmas_true", |
|
"kdpm2_ancestral_discrete", |
|
"kdpm2_ancestral_discrete_karras_sigmas_true", |
|
"euler_discrete", |
|
"euler_ancestral_discrete", |
|
"heun_discrete", |
|
"lms_discrete", |
|
"lms_discrete_karras_sigmas_true", |
|
"pndm", |
|
"pndm_skip_prk_steps_true", |
|
"deis_multistep", |
|
"dpm_solver_sde", |
|
"uni_pc_multistep" |
|
] |
|
|
|
|
|
|
|
scheduler_long_names_object = { |
|
"model_default": "Model Default", |
|
"ddim": "DDIM", |
|
"ddpm": "DDPM", |
|
"dpm_solver_multistep": "DPM++ 2M (DPMSolverMultistep)", |
|
"dpm_solver_multistep_karras_sigmas_true": "DPM++ 2M Karras (DPMSolverMultistep with use_karras_sigmas=True)", |
|
"dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE (DPMSolverMultistep with algorithm_type=\"sde-dpmsolver++\")", |
|
"dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE Karras (DPMSolverMultistep with use_karras_sigmas=True & algorithm_type=\"sde-dpmsolver++\")", |
|
"dpm_solver_singlestep": "DPM++ SDE (DPMSolverSinglestep)", |
|
"dpm_solver_singlestep_karras_sigmas_true": "DPM++ SDE Karras (DPMSolverSinglestep with use_karras_sigmas=True)", |
|
"kdpm2_discrete": "DPM2 (KDPM2Discrete)", |
|
"kdpm2_discrete_karras_sigmas_true": "DPM2 Karras (KDPM2Discrete with use_karras_sigmas=True)", |
|
"kdpm2_ancestral_discrete": "DPM2 a (KDPM2AncestralDiscrete)", |
|
"kdpm2_ancestral_discrete_karras_sigmas_true": "DPM2 a Karras (KDPM2AncestralDiscrete with use_karras_sigmas=True)", |
|
"euler_discrete": "Euler (EulerDiscrete)", |
|
"euler_ancestral_discrete": "Euler a (EulerAncestralDiscrete)", |
|
"heun_discrete": "Heun (HeunDiscrete)", |
|
"lms_discrete": "LMS (LMSDiscrete)", |
|
"lms_discrete_karras_sigmas_true": "LMS Karras (LMSDiscrete with use_karras_sigmas=True)", |
|
"pndm": "PNDM", |
|
"pndm_skip_prk_steps_true": "PNDM (with skip_prk_steps=True) - Close to PLMS", |
|
"deis_multistep": "DEISMultistep", |
|
"dpm_solver_sde": "DPMSolverSDE", |
|
"uni_pc_multistep": "UniPCMultistep" |
|
} |
|
|
|
|
|
|
|
scheduler_short_names_object = { |
|
"ddim": "DDIM", |
|
"ddpm": "DDPM", |
|
"dpm_solver_multistep": "DPM++ 2M", |
|
"dpm_solver_multistep_karras_sigmas_true": "DPM++ 2M Karras", |
|
"dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE", |
|
"dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE Karras", |
|
"dpm_solver_singlestep": "DPM++ SDE", |
|
"dpm_solver_singlestep_karras_sigmas_true": "DPM++ SDE Karras", |
|
"kdpm2_discrete": "DPM2", |
|
"kdpm2_discrete_karras_sigmas_true": "DPM2 Karras", |
|
"kdpm2_ancestral_discrete": "DPM2 a", |
|
"kdpm2_ancestral_discrete_karras_sigmas_true": "DPM2 a Karras", |
|
"euler_discrete": "Euler", |
|
"euler_ancestral_discrete": "Euler a", |
|
"heun_discrete": "Heun", |
|
"lms_discrete": "LMS", |
|
"lms_discrete_karras_sigmas_true": "LMS Karras", |
|
"pndm": "PNDM", |
|
"pndm_skip_prk_steps_true": "PNDM (with skip_prk_steps=True) - Close to PLMS", |
|
"deis_multistep": "DEISMultistep", |
|
"dpm_solver_sde": "DPMSolverSDE", |
|
"uni_pc_multistep": "UniPCMultistep" |
|
} |
|
|
|
|
|
|
|
scheduler_name_to_identifier_in_app_object = { |
|
"DDIMScheduler": "ddim", |
|
"DDPMScheduler": "ddpm", |
|
"DPMSolverMultistepScheduler": "dpm_solver_multistep", |
|
"DPMSolverSinglestepScheduler": "dpm_solver_singlestep", |
|
"KDPM2DiscreteScheduler": "kdpm2_discrete", |
|
"KDPM2AncestralDiscreteScheduler": "kdpm2_ancestral_discrete", |
|
"EulerDiscreteScheduler": "euler_discrete", |
|
"EulerAncestralDiscreteScheduler": "euler_ancestral_discrete", |
|
"HeunDiscreteScheduler": "heun_discrete", |
|
"LMSDiscreteScheduler": "lms_discrete", |
|
"PNDMScheduler": "pndm", |
|
"DEISMultistepScheduler": "deis_multistep", |
|
"DPMSolverSDEScheduler": "dpm_solver_sde", |
|
"UniPCMultistepScheduler": "uni_pc_multistep" |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
device = "cpu" |
|
|
|
if torch.cuda.is_available(): |
|
|
|
device = "cuda" |
|
|
|
PYTORCH_CUDA_ALLOC_CONF = { |
|
"max_split_size_mb": 8000 |
|
} |
|
torch.cuda.max_memory_allocated( |
|
device = device |
|
) |
|
torch.cuda.empty_cache() |
|
|
|
if device == "cpu": |
|
|
|
default_base_model = "sdxl_turbo" |
|
|
|
|
|
|
|
default_prompt = "" |
|
default_negative_prompt = "" |
|
|
|
default_width = 768 |
|
default_height = 768 |
|
|
|
default_guidance_scale_value = 7.5 |
|
|
|
default_seed_value = 0 |
|
|
|
default_base_model_base_model_num_inference_steps = 50 |
|
default_base_model_base_model_num_inference_steps_for_sdxl_turbo = 2 |
|
|
|
default_seed_maximum = 1000000000000000000 |
|
default_seed_value = 876678173805928800 |
|
|
|
|
|
|
|
|
|
enable_refiner = 1 |
|
enable_upscaler = 1 |
|
|
|
|
|
|
|
default_refiner_selected = 0 |
|
default_upscaler_selected = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
default_refiner_accordion_open = 1 |
|
default_upscaler_accordion_open = 1 |
|
|
|
|
|
|
|
maximum_upscaler_steps = 150 |
|
default_upscaler_steps = 50 |
|
|
|
|
|
|
|
|
|
|
|
use_xformers = 1 |
|
|
|
|
|
|
|
|
|
|
|
|
|
use_default_attn_processor = 0 |
|
|
|
display_xformers_usage_in_prompt_info = 1 |
|
include_transformers_version_in_prompt_info = 1 |
|
display_default_attn_processor_usage_in_prompt_info = 1 |
|
|
|
|
|
|
|
|
|
use_sequential_cpu_offload_for_base_model = 1 |
|
use_sequential_cpu_offload_for_refiner = 1 |
|
use_sequential_cpu_offload_for_upscaler = 1 |
|
|
|
use_model_cpu_offload_for_base_model = 0 |
|
use_model_cpu_offload_for_refiner = 0 |
|
use_model_cpu_offload_for_upscaler = 0 |
|
|
|
|
|
|
|
if default_base_model == "photoreal": |
|
|
|
|
|
|
|
|
|
|
|
|
|
default_seed_value = 0 |
|
|
|
elif default_base_model == "sdxl_turbo": |
|
|
|
|
|
|
|
|
|
default_seed_value = 0 |
|
|
|
elif default_base_model == "sd_1_5_runwayml": |
|
|
|
|
|
|
|
default_seed_value = 0 |
|
|
|
else: |
|
|
|
|
|
|
|
default_width = 1024 |
|
default_height = 1024 |
|
|
|
|
|
default_guidance_scale_value = 7.5 |
|
|
|
|
|
default_seed_value = 0 |
|
|
|
|
|
|
|
|
|
|
|
width_and_height_input_slider_steps = 8 |
|
|
|
|
|
|
|
opening_html = "" |
|
ending_html = "" |
|
|
|
max_queue_size = max_queue_size_if_torch |
|
|
|
if device == "cpu": |
|
|
|
opening_html = "<span style=\"font-weight: bold; color: #c00;\">THIS APP IS EXCEPTIONALLY SLOW!</span><br/>This app is not running on a GPU. The first time it loads after the space is rebuilt it might take 10 minutes to generate a SDXL Turbo image. It may take 2 to 3 minutes after that point to do two steps. For other models, it may take hours to create a single image." |
|
|
|
max_queue_size = max_queue_size_if_cpu |
|
|
|
|
|
|
|
if allow_online_configurations == 1: |
|
|
|
ending_html = """This app allows you to try to match images that can be generated using several tools online. (<a href=\"https://huggingface.co/spaces/Manjushri/SDXL-1.0\" target=\"_blank\">Stable Diffusion XL</a>, <a href=\"https://huggingface.co/spaces/Manjushri/PhotoReal-V3.7.5\" target=\"_blank\">PhotoReal with SDXL 1.0 Refiner</a> and <a href=\"https://huggingface.co/spaces/diffusers/unofficial-SDXL-Turbo-i2i-t2i\" target=\"_blank\">SDXL Turbo Unofficial Demo</a>) You can select the base model you want to use in the first dropdown option. The second configuration option involves choosing which version and/or configuration to choose. Certain configurations try to match the version online, taking into account changes that were made over time. Another configuration involves a default configuration I choose and is subject to change while I am still designing this app. |
|
|
|
""" |
|
|
|
|
|
|
|
ending_html += """Tokens are not individual characters. If the prompt length is too long, the display will notify you what part of the prompt wasn't used. Changing just the image dimensions alone will change the image generated. For some models, trying to make a large image, such as 1024x1024, may add extra people and come out worse than using smaller dimensions. |
|
|
|
The original script for this app was written by <a href=\"https://huggingface.co/Manjushri\" target=\"_blank\">Manjushri</a>.""" |
|
|
|
|
|
|
|
refiner_on_text = "Refiner is on. " |
|
refiner_off_text = "Refiner is off. " |
|
|
|
upscaler_on_text = "Upscaler is on. " |
|
upscaler_off_text = "Upscaler is off. " |
|
|
|
number_of_reserved_tokens = 2 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
if (str(os.uname()).find("magicfixeseverything") >= 0): |
|
script_being_run_on_hugging_face = 1 |
|
except: |
|
script_being_run_on_hugging_face = 0 |
|
|
|
|
|
|
|
if script_being_run_on_hugging_face == 1: |
|
|
|
use_custom_hugging_face_cache_dir = 0 |
|
auto_save_imagery = 0 |
|
show_messages_in_modal_on_page = 0 |
|
show_messages_in_command_prompt = 1 |
|
|
|
allow_online_configurations = 0 |
|
|
|
if device == "cpu": |
|
|
|
show_image_creation_progress_log = 1 |
|
|
|
default_width = 768 |
|
default_height = 768 |
|
|
|
ending_html = """ |
|
If you would like to download this app to run offline on a Windows computer that has a NVIDIA graphics card, click <a href=\"https://huggingface.co/spaces/magicfixeseverything/ai_image_creation/resolve/main/ai_image_creation.zip\">here</a> to download it. |
|
|
|
""" + ending_html |
|
|
|
|
|
|
|
|
|
|
|
default_width = 768 |
|
default_height = 768 |
|
|
|
if allow_online_configurations == 0: |
|
|
|
base_model_array = [ |
|
"sdxl", |
|
"photoreal", |
|
"sdxl_turbo" |
|
] |
|
|
|
base_model_object_of_model_configuration_arrays = { |
|
"sdxl": [ |
|
"sdxl_default" |
|
], |
|
"photoreal": [ |
|
"photoreal_default" |
|
], |
|
"sdxl_turbo": [ |
|
"sdxl_turbo_default" |
|
] |
|
} |
|
|
|
base_model_model_configuration_defaults_object = { |
|
"sdxl": "sdxl_default", |
|
"photoreal": "photoreal_default", |
|
"sdxl_turbo": "sdxl_turbo_default" |
|
} |
|
|
|
|
|
|
|
hugging_face_hub_is_offline = 0 |
|
|
|
if script_being_run_on_hugging_face == 0: |
|
|
|
if ( |
|
("HF_HUB_OFFLINE" in os.environ) and |
|
(int(os.environ["HF_HUB_OFFLINE"]) == 1) |
|
): |
|
|
|
hugging_face_hub_is_offline = 1 |
|
|
|
if suppress_hugging_face_hub_offline_status == 1: |
|
|
|
if hugging_face_hub_is_offline == 0: |
|
|
|
print ("Note: The Hugging Face cache directory does not automatically delete older data. Over time, it could eventually grow to use all the space on the drive it is on. You either need to manually clean out the folder occasionally or see Instructons.txt on how to not automatically update data once you have downloaded everything you need.") |
|
|
|
else: |
|
|
|
print ("You are working offline. Data will not be downloaded. See \"ai_image_creation.bat\" or \"Instructions.txt\" for more info.") |
|
|
|
|
|
|
|
if device == "cuda": |
|
|
|
PYTORCH_CUDA_ALLOC_CONF = { |
|
"max_split_size_mb": 8000 |
|
} |
|
torch.cuda.max_memory_allocated( |
|
device = device |
|
) |
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
saved_images_dir = main_dir + "/" + saved_images_folder_name |
|
|
|
hugging_face_cache_dir = main_dir + "/" + cache_directory_folder_name |
|
|
|
if not os.path.exists(hugging_face_cache_dir): |
|
os.makedirs(hugging_face_cache_dir) |
|
|
|
|
|
|
|
if auto_save_imagery == 1: |
|
|
|
from datetime import datetime |
|
import time |
|
|
|
|
|
|
|
if ( |
|
(log_generation_times == 1) or |
|
(show_image_creation_progress_log == 1) |
|
): |
|
|
|
import time |
|
|
|
|
|
|
|
if device == "cpu": |
|
|
|
use_sequential_cpu_offload_for_base_model = 0 |
|
use_sequential_cpu_offload_for_refiner = 0 |
|
use_sequential_cpu_offload_for_upscaler = 0 |
|
|
|
use_model_cpu_offload_for_base_model = 0 |
|
use_model_cpu_offload_for_refiner = 0 |
|
use_model_cpu_offload_for_upscaler = 0 |
|
|
|
use_xformers = 0 |
|
|
|
|
|
|
|
if ( |
|
(use_sequential_cpu_offload_for_base_model == 1) and |
|
(use_model_cpu_offload_for_base_model == 1) |
|
): |
|
|
|
use_sequential_cpu_offload_for_base_model = 0 |
|
|
|
if ( |
|
(use_sequential_cpu_offload_for_refiner == 1) and |
|
(use_model_cpu_offload_for_refiner == 1) |
|
): |
|
|
|
use_sequential_cpu_offload_for_refiner = 0 |
|
|
|
if ( |
|
(use_sequential_cpu_offload_for_upscaler == 1) and |
|
(use_model_cpu_offload_for_upscaler == 1) |
|
): |
|
|
|
use_sequential_cpu_offload_for_upscaler = 0 |
|
|
|
|
|
|
|
def error_function( |
|
text_message |
|
): |
|
|
|
print (text_message) |
|
|
|
gr.Error(text_message) |
|
|
|
exit(1) |
|
|
|
|
|
|
|
additional_prompt_info_html = "" |
|
|
|
if auto_save_imagery == 1: |
|
|
|
additional_prompt_info_html = " The image, and a text file with generation information, will be saved automatically." |
|
|
|
|
|
|
|
if use_xformers == 1: |
|
|
|
from xformers.ops import MemoryEfficientAttentionFlashAttentionOp |
|
|
|
if use_default_attn_processor == 1: |
|
|
|
from diffusers.models.attention_processor import AttnProcessor |
|
|
|
|
|
|
|
if ( |
|
default_base_model and |
|
(default_base_model in base_model_object_of_model_configuration_arrays) and |
|
(default_base_model in base_model_model_configuration_defaults_object) |
|
): |
|
|
|
default_model_configuration = base_model_model_configuration_defaults_object[default_base_model] |
|
|
|
if default_model_configuration in model_configuration_names_object: |
|
|
|
default_model_configuration_choices_array = [] |
|
|
|
for this_model_configuration in base_model_object_of_model_configuration_arrays[default_base_model]: |
|
|
|
if model_configuration_names_object[this_model_configuration]: |
|
|
|
default_model_configuration_choices_array.append( |
|
model_configuration_names_object[this_model_configuration] |
|
) |
|
|
|
else: |
|
|
|
error_function("A default configuration must be properly named in the code.") |
|
|
|
else: |
|
|
|
error_function("A default configuration must be properly configured in the code.") |
|
|
|
else: |
|
|
|
error_function("A default base model must be properly configured in the code.") |
|
|
|
|
|
|
|
default_base_model_nicely_named_value = base_model_names_object[default_base_model] |
|
|
|
default_model_configuration_nicely_named_value = model_configuration_names_object[default_model_configuration] |
|
|
|
|
|
|
|
if not ( |
|
default_scheduler and |
|
default_scheduler in scheduler_long_names_object |
|
): |
|
|
|
error_function("A default scheduler must be properly configured in the code.") |
|
|
|
default_scheduler_nicely_named_value = scheduler_long_names_object[default_scheduler] |
|
|
|
|
|
|
|
if enable_refiner != 1: |
|
|
|
default_refiner_selected = 0 |
|
|
|
if enable_upscaler != 1: |
|
|
|
default_upscaler_selected = 0 |
|
|
|
|
|
|
|
model_configuration_requires_refiner = 0 |
|
|
|
if default_model_configuration in model_configuration_force_refiner_object: |
|
|
|
model_configuration_requires_refiner = model_configuration_force_refiner_object[default_model_configuration] |
|
|
|
if model_configuration_requires_refiner == 1: |
|
|
|
enable_refiner = 1 |
|
default_refiner_selected = 1 |
|
|
|
default_refine_option = "No" |
|
|
|
if default_refiner_selected == 1: |
|
|
|
default_refine_option = "Yes" |
|
|
|
default_upscale_option = "No" |
|
|
|
if default_upscaler_selected == 1: |
|
|
|
default_upscale_option = "Yes" |
|
|
|
is_default_config = 1 |
|
|
|
if default_model_configuration in online_configurations_object: |
|
|
|
is_default_config = 0 |
|
|
|
default_refiner_and_upscaler_status_text = "" |
|
|
|
|
|
|
|
default_use_denoising_start_in_base_model_when_using_refiner_is_selected = False |
|
|
|
if default_use_denoising_start_in_base_model_when_using_refiner == 1: |
|
|
|
default_use_denoising_start_in_base_model_when_using_refiner_is_selected = True |
|
|
|
default_base_model_output_to_refiner_is_in_latent_space_is_selected = False |
|
|
|
if default_base_model_output_to_refiner_is_in_latent_space == 1: |
|
|
|
default_base_model_output_to_refiner_is_in_latent_space_is_selected = True |
|
|
|
|
|
|
|
refiner_default_config_accordion_visible = True |
|
|
|
if ( |
|
(enable_refiner != 1) or |
|
(is_default_config != 1) |
|
): |
|
|
|
refiner_default_config_accordion_visible = False |
|
|
|
refiner_default_config_accordion_open = False |
|
|
|
if ( |
|
(default_refiner_accordion_open == 1) or |
|
( |
|
(is_default_config == 1) and |
|
(default_refiner_selected == 1) |
|
) |
|
): |
|
|
|
refiner_default_config_accordion_open = True |
|
|
|
|
|
|
|
refiner_online_config_accordion_visible = True |
|
|
|
if ( |
|
(enable_refiner != 1) or |
|
(is_default_config == 1) |
|
): |
|
|
|
refiner_online_config_accordion_visible = False |
|
|
|
refiner_online_config_accordion_open = False |
|
|
|
if ( |
|
(default_refiner_accordion_open == 1) or |
|
( |
|
(is_default_config != 1) and |
|
(default_refiner_selected == 1) |
|
) |
|
): |
|
|
|
refiner_online_config_accordion_open = True |
|
|
|
refiner_group_visible = False |
|
|
|
if enable_refiner == 1: |
|
|
|
refiner_group_visible = True |
|
|
|
if default_refiner_selected == 1: |
|
|
|
default_refiner_and_upscaler_status_text += refiner_on_text |
|
|
|
else: |
|
|
|
default_refiner_and_upscaler_status_text += refiner_off_text |
|
|
|
|
|
|
|
upscaler_accordion_open = False |
|
|
|
if ( |
|
(default_upscaler_selected == 1) or |
|
(default_upscaler_accordion_open == 1) |
|
): |
|
|
|
upscaler_accordion_open = True |
|
|
|
upscaler_group_visible = False |
|
|
|
if enable_upscaler == 1: |
|
|
|
upscaler_group_visible = True |
|
|
|
if default_upscaler_selected == 1: |
|
|
|
default_refiner_and_upscaler_status_text += upscaler_on_text |
|
|
|
else: |
|
|
|
default_refiner_and_upscaler_status_text += upscaler_off_text |
|
|
|
|
|
|
|
default_negative_prompt_field_row_visibility = True |
|
default_negative_prompt_for_sdxl_turbo_field_row_visibility = False |
|
default_base_model_num_inference_steps_field_row_visibility = True |
|
default_base_model_num_inference_steps_field_for_sdxl_turbo_field_row_visibility = False |
|
default_guidance_scale_field_row_visibility = True |
|
default_guidance_scale_for_sdxl_turbo_field_row_visibility = False |
|
|
|
if default_base_model == "sdxl_turbo": |
|
|
|
default_negative_prompt_field_row_visibility = False |
|
default_negative_prompt_for_sdxl_turbo_field_row_visibility = True |
|
default_base_model_num_inference_steps_field_row_visibility = False |
|
default_base_model_num_inference_steps_field_for_sdxl_turbo_field_row_visibility = True |
|
default_guidance_scale_field_row_visibility = False |
|
default_guidance_scale_for_sdxl_turbo_field_row_visibility = True |
|
|
|
|
|
|
|
default_add_seed_into_pipe_field_row_visibility = False |
|
|
|
if is_default_config == 1: |
|
|
|
default_add_seed_into_pipe_field_row_visibility = True |
|
|
|
|
|
|
|
default_add_seed_into_pipe_is_selected = False |
|
|
|
if default_add_seed_into_pipe == 1: |
|
|
|
default_add_seed_into_pipe_is_selected = True |
|
|
|
|
|
|
|
default_base_model_choices_array = [] |
|
|
|
stored_model_configuration_names_object = {} |
|
|
|
for this_base_model in base_model_array: |
|
|
|
default_base_model_choices_array.append( |
|
base_model_names_object[this_base_model] |
|
) |
|
|
|
stored_model_configuration = base_model_model_configuration_defaults_object[this_base_model] |
|
|
|
stored_model_configuration_names_object[this_base_model] = model_configuration_names_object[stored_model_configuration] |
|
|
|
|
|
|
|
default_scheduler_choices_array = [] |
|
|
|
for this_scheduler in schedulers_array: |
|
|
|
default_scheduler_choices_array.append( |
|
scheduler_long_names_object[this_scheduler] |
|
) |
|
|
|
|
|
|
|
make_seed_selection_a_textbox = 1 |
|
|
|
if default_seed_maximum <= 9007199254740992: |
|
|
|
make_seed_selection_a_textbox = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def show_message( |
|
message_to_display |
|
): |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print (message_to_display) |
|
|
|
if show_messages_in_modal_on_page == 1: |
|
|
|
gr.Info(message_to_display) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def convert_seconds( |
|
seconds |
|
): |
|
|
|
|
|
|
|
hours = seconds // 3600 |
|
minutes = (seconds % 3600) // 60 |
|
seconds = seconds % 60 |
|
return hours, minutes, seconds |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def seed_not_valid(seed_num_str): |
|
try: |
|
seed_num = int(seed_num_str) |
|
if (seed_num > 0) and (seed_num <= default_seed_maximum): |
|
return False |
|
else: |
|
return True |
|
except ValueError: |
|
return True |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def numerical_bool( |
|
original_value |
|
): |
|
|
|
new_value = 0 |
|
|
|
if ( |
|
(original_value == 1) or |
|
(original_value == "Yes") or |
|
(original_value == "True") or |
|
(original_value == True) |
|
): |
|
|
|
new_value = 1 |
|
|
|
return new_value |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def truncate_prompt ( |
|
pipe, |
|
existing_prompt_text |
|
): |
|
|
|
|
|
|
|
|
|
|
|
tokenizer = pipe.tokenizer |
|
|
|
max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens |
|
|
|
prompt_text_words_array = existing_prompt_text.split(" ") |
|
|
|
prompt_text_words_array_length = len(prompt_text_words_array) |
|
|
|
prompt_text_words_index = 0 |
|
|
|
prompt_text_substring = "" |
|
prompt_text_not_used_substring = "" |
|
|
|
for prompt_text_word in prompt_text_words_array: |
|
|
|
prompt_text_words_index += 1 |
|
|
|
substring_to_test = prompt_text_substring |
|
|
|
if prompt_text_words_index > 1: |
|
|
|
substring_to_test += " " |
|
|
|
substring_to_test += prompt_text_word |
|
|
|
token_length_of_substring_to_test = len(tokenizer.tokenize(substring_to_test)) |
|
|
|
if token_length_of_substring_to_test > max_token_length_of_model: |
|
|
|
prompt_text_not_used_substring += prompt_text_word + " " |
|
|
|
else: |
|
|
|
prompt_text_substring = substring_to_test |
|
|
|
return ( |
|
prompt_text_substring, |
|
prompt_text_not_used_substring |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def construct_pipe ( |
|
base_model_name_value, |
|
model_configuration_name_value |
|
): |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
base_model_kwargs = {} |
|
|
|
if ( |
|
(base_model_name_value == "sdxl") or |
|
(base_model_name_value == "photoreal") or |
|
(base_model_name_value == "sdxl_turbo") or |
|
(base_model_name_value == "sd_1_5_runwayml") |
|
): |
|
|
|
base_model_kwargs["use_safetensors"] = True |
|
|
|
if use_safety_checker == 0: |
|
|
|
if ( |
|
(base_model_name_value == "photoreal") or |
|
(base_model_name_value == "sd_1_5_runwayml") |
|
): |
|
|
|
base_model_kwargs = { |
|
"safety_checker": None, |
|
"requires_safety_checker": False |
|
} |
|
|
|
if device == "cuda": |
|
|
|
if ( |
|
(base_model_name_value == "sdxl") or |
|
(base_model_name_value == "sdxl_turbo") or |
|
(base_model_name_value == "sd_1_5_runwayml") |
|
): |
|
|
|
base_model_kwargs["variant"] = "fp16" |
|
|
|
base_model_kwargs["torch_dtype"] = torch.float16 |
|
|
|
if use_custom_hugging_face_cache_dir == 1: |
|
|
|
base_model_kwargs["cache_dir"] = hugging_face_cache_dir |
|
|
|
pipe = DiffusionPipeline.from_pretrained( |
|
model_configuration_links_object[model_configuration_name_value], |
|
**base_model_kwargs |
|
) |
|
|
|
if use_model_cpu_offload_for_base_model == 1: |
|
pipe.enable_model_cpu_offload() |
|
|
|
if use_xformers == 1: |
|
pipe.enable_xformers_memory_efficient_attention() |
|
|
|
pipe = pipe.to(device) |
|
|
|
if use_sequential_cpu_offload_for_base_model == 1: |
|
pipe.enable_sequential_cpu_offload() |
|
|
|
if use_default_attn_processor == 1: |
|
pipe.unet.set_default_attn_processor() |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
else: |
|
pipe.unet = torch.compile( |
|
pipe.unet, |
|
mode = "reduce-overhead", |
|
fullgraph = True |
|
) |
|
|
|
return ( |
|
pipe |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def configure_scheduler ( |
|
pipe, |
|
scheduler_value |
|
): |
|
|
|
scheduler_config = pipe.scheduler.config |
|
|
|
scheduler = scheduler_value |
|
|
|
|
|
|
|
if scheduler_value == "model_default": |
|
|
|
scheduler_name = pipe.scheduler.config._class_name |
|
|
|
if scheduler_name in scheduler_name_to_identifier_in_app_object: |
|
|
|
scheduler = scheduler_name_to_identifier_in_app_object[scheduler_name] |
|
|
|
|
|
|
|
scheduler_used = scheduler |
|
|
|
|
|
|
|
if scheduler == "ddim": |
|
|
|
from diffusers import DDIMScheduler |
|
pipe.scheduler = DDIMScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "ddpm": |
|
|
|
from diffusers import DDPMScheduler |
|
pipe.scheduler = DDPMScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_multistep": |
|
|
|
from diffusers import DPMSolverMultistepScheduler |
|
pipe.scheduler = DPMSolverMultistepScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_multistep_karras_sigmas_true": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"use_karras_sigmas": True}) |
|
|
|
from diffusers import DPMSolverMultistepScheduler |
|
pipe.scheduler = DPMSolverMultistepScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"algorithm_type": "sde-dpmsolver++"}) |
|
|
|
from diffusers import DPMSolverMultistepScheduler |
|
pipe.scheduler = DPMSolverMultistepScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"use_karras_sigmas": True}) |
|
new_scheduler_config.update({"algorithm_type": "sde-dpmsolver++"}) |
|
|
|
from diffusers import DPMSolverMultistepScheduler |
|
pipe.scheduler = DPMSolverMultistepScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_singlestep": |
|
|
|
from diffusers import DPMSolverSinglestepScheduler |
|
pipe.scheduler = DPMSolverSinglestepScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_singlestep_karras_sigmas_true": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"use_karras_sigmas": True}) |
|
|
|
from diffusers import DPMSolverSinglestepScheduler |
|
pipe.scheduler = DPMSolverSinglestepScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "kdpm2_discrete": |
|
|
|
from diffusers import KDPM2DiscreteScheduler |
|
pipe.scheduler = KDPM2DiscreteScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "kdpm2_discrete_karras_sigmas_true": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"use_karras_sigmas": True}) |
|
|
|
from diffusers import KDPM2DiscreteScheduler |
|
pipe.scheduler = KDPM2DiscreteScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "kdpm2_ancestral_discrete": |
|
|
|
from diffusers import KDPM2AncestralDiscreteScheduler |
|
pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "kdpm2_ancestral_discrete_karras_sigmas_true": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"use_karras_sigmas": True}) |
|
|
|
from diffusers import KDPM2AncestralDiscreteScheduler |
|
pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "euler_discrete": |
|
|
|
from diffusers import EulerDiscreteScheduler |
|
pipe.scheduler = EulerDiscreteScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "euler_ancestral_discrete": |
|
|
|
from diffusers import EulerAncestralDiscreteScheduler |
|
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "heun_discrete": |
|
|
|
from diffusers import HeunDiscreteScheduler |
|
pipe.scheduler = HeunDiscreteScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "lms_discrete": |
|
|
|
from diffusers import LMSDiscreteScheduler |
|
pipe.scheduler = LMSDiscreteScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "lms_discrete_karras_sigmas_true": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"use_karras_sigmas": True}) |
|
|
|
from diffusers import LMSDiscreteScheduler |
|
pipe.scheduler = LMSDiscreteScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "pndm": |
|
|
|
from diffusers import PNDMScheduler |
|
pipe.scheduler = PNDMScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "pndm_skip_prk_steps_true": |
|
|
|
new_scheduler_config = dict(pipe.scheduler.config) |
|
new_scheduler_config.update({"skip_prk_steps": True}) |
|
|
|
from diffusers import PNDMScheduler |
|
pipe.scheduler = PNDMScheduler.from_config(new_scheduler_config) |
|
|
|
elif scheduler == "deis_multistep": |
|
|
|
from diffusers import DEISMultistepScheduler |
|
pipe.scheduler = DEISMultistepScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "dpm_solver_sde": |
|
|
|
from diffusers import DPMSolverSDEScheduler |
|
pipe.scheduler = DPMSolverSDEScheduler.from_config(scheduler_config) |
|
|
|
elif scheduler == "uni_pc_multistep": |
|
|
|
from diffusers import UniPCMultistepScheduler |
|
pipe.scheduler = UniPCMultistepScheduler.from_config(scheduler_config) |
|
|
|
else: |
|
|
|
from diffusers import PNDMScheduler |
|
pipe.scheduler = PNDMScheduler.from_config(scheduler_config) |
|
|
|
scheduler_used = "pndm" |
|
|
|
|
|
|
|
return ( |
|
scheduler_used |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def construct_refiner (): |
|
|
|
refiner_kwargs = { |
|
"use_safetensors": True |
|
} |
|
|
|
if device == "cuda": |
|
|
|
refiner_kwargs["variant"] = "fp16" |
|
refiner_kwargs["torch_dtype"] = torch.float16 |
|
|
|
if use_custom_hugging_face_cache_dir == 1: |
|
|
|
refiner_kwargs["cache_dir"] = hugging_face_cache_dir |
|
|
|
refiner = DiffusionPipeline.from_pretrained( |
|
hugging_face_refiner_partial_path, |
|
**refiner_kwargs |
|
) |
|
|
|
if use_model_cpu_offload_for_refiner == 1: |
|
|
|
refiner.enable_model_cpu_offload() |
|
|
|
if use_xformers == 1: |
|
|
|
refiner.enable_xformers_memory_efficient_attention() |
|
|
|
refiner = refiner.to(device) |
|
|
|
if use_sequential_cpu_offload_for_refiner == 1: |
|
|
|
refiner.enable_sequential_cpu_offload() |
|
|
|
if use_default_attn_processor == 1: |
|
|
|
refiner.unet.set_default_attn_processor() |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
else: |
|
refiner.unet = torch.compile( |
|
refiner.unet, |
|
mode = "reduce-overhead", |
|
fullgraph = True |
|
) |
|
|
|
return ( |
|
refiner |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def construct_upscaler (): |
|
|
|
upscaler_kwargs = { |
|
"use_safetensors": True |
|
} |
|
|
|
if device == "cuda": |
|
|
|
upscaler_kwargs["torch_dtype"] = torch.float16 |
|
|
|
if use_custom_hugging_face_cache_dir == 1: |
|
|
|
upscaler_kwargs["cache_dir"] = hugging_face_cache_dir |
|
|
|
upscaler = DiffusionPipeline.from_pretrained( |
|
hugging_face_upscaler_partial_path, |
|
**upscaler_kwargs |
|
) |
|
|
|
if use_model_cpu_offload_for_upscaler == 1: |
|
|
|
upscaler.enable_model_cpu_offload() |
|
|
|
if use_xformers == 1: |
|
|
|
upscaler.enable_xformers_memory_efficient_attention() |
|
|
|
upscaler = upscaler.to(device) |
|
|
|
if use_sequential_cpu_offload_for_upscaler == 1: |
|
|
|
upscaler.enable_sequential_cpu_offload() |
|
|
|
if use_default_attn_processor == 1: |
|
|
|
upscaler.unet.set_default_attn_processor() |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
else: |
|
upscaler.unet = torch.compile( |
|
upscaler.unet, |
|
mode = "reduce-overhead", |
|
fullgraph = True |
|
) |
|
|
|
return ( |
|
upscaler |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def update_prompt_info_from_gallery ( |
|
gallery_data: gr.SelectData, |
|
image_gallery_array_state_value |
|
): |
|
|
|
gallery_data_index = gallery_data.index |
|
|
|
output_image_gallery_field_update = gr.Gallery( |
|
selected_index = gallery_data_index |
|
) |
|
|
|
output_text_field_update = image_gallery_array_state_value[gallery_data_index] |
|
|
|
return { |
|
output_image_gallery_field: output_image_gallery_field_update, |
|
output_text_field: output_text_field_update |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def before_create_image_function (): |
|
|
|
generate_image_btn_update = gr.Button( |
|
value = "Generating...", |
|
variant = "secondary", |
|
interactive = False |
|
) |
|
|
|
output_text_field_update = gr.Textbox( |
|
visible = False |
|
) |
|
|
|
return { |
|
generate_image_btn: generate_image_btn_update, |
|
output_text_field: output_text_field_update |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def after_create_image_function (): |
|
|
|
generate_image_btn_update = gr.Button( |
|
value = "Generate", |
|
variant = "primary", |
|
interactive = True |
|
) |
|
|
|
output_text_field_update = gr.Textbox( |
|
visible = True |
|
) |
|
|
|
return { |
|
generate_image_btn: generate_image_btn_update, |
|
output_text_field: output_text_field_update |
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_image_function ( |
|
|
|
base_model_field_index, |
|
prompt_text, |
|
negative_prompt_text, |
|
|
|
scheduler_index, |
|
|
|
image_width, |
|
image_height, |
|
guidance_scale, |
|
base_model_num_inference_steps, |
|
base_model_num_inference_steps_field_for_sdxl_turbo, |
|
actual_seed, |
|
add_seed_into_pipe, |
|
|
|
refining_selection_default_config_field_value, |
|
refining_selection_online_config_normal_field_value, |
|
refining_selection_online_config_automatically_selected_field_value, |
|
|
|
refining_denoise_start_for_default_config_field_value, |
|
refining_use_denoising_start_in_base_model_when_using_refiner_field_value, |
|
refining_base_model_output_to_refiner_is_in_latent_space_field_value, |
|
|
|
refining_denoise_start_for_online_config_field_value, |
|
refining_number_of_iterations_for_online_config_field_value, |
|
|
|
upscaling_selection_field_value, |
|
upscaling_num_inference_steps, |
|
|
|
image_gallery_array_state_value, |
|
prompt_information_array_state_value, |
|
|
|
last_model_configuration_name_selected_state_value, |
|
last_refiner_name_selected_state_value, |
|
last_upscaler_name_selected_state_value, |
|
|
|
stored_pipe_state, |
|
stored_refiner_state, |
|
stored_upscaler_state, |
|
|
|
*model_configuration_dropdown_fields_array, |
|
|
|
progress = gr.Progress() |
|
|
|
): |
|
|
|
position_in_array = 0 |
|
|
|
model_configuration_field_object = {} |
|
|
|
for model_configuration_field_index in model_configuration_dropdown_fields_array: |
|
|
|
this_base_model = base_model_array[position_in_array] |
|
|
|
model_configuration_field_object[this_base_model] = model_configuration_field_index |
|
|
|
position_in_array += 1 |
|
|
|
|
|
|
|
add_seed_into_pipe = numerical_bool(add_seed_into_pipe) |
|
|
|
refining_selection_default_config_field_value = numerical_bool(refining_selection_default_config_field_value) |
|
refining_selection_online_config_normal_field_value = numerical_bool(refining_selection_online_config_normal_field_value) |
|
refining_selection_online_config_automatically_selected_field_value = numerical_bool(refining_selection_online_config_automatically_selected_field_value) |
|
|
|
|
|
refining_use_denoising_start_in_base_model_when_using_refiner_field_value = numerical_bool(refining_use_denoising_start_in_base_model_when_using_refiner_field_value) |
|
refining_base_model_output_to_refiner_is_in_latent_space_field_value = numerical_bool(refining_base_model_output_to_refiner_is_in_latent_space_field_value) |
|
|
|
|
|
|
|
use_upscaler = numerical_bool(upscaling_selection_field_value) |
|
|
|
|
|
|
|
base_model_name_value = base_model_array[base_model_field_index] |
|
|
|
model_configuration_field_index = model_configuration_field_object[base_model_name_value] |
|
model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_name_value][model_configuration_field_index] |
|
|
|
scheduler_value = schedulers_array[scheduler_index] |
|
|
|
|
|
|
|
current_actual_total_base_model_steps = base_model_num_inference_steps |
|
|
|
|
|
|
|
current_actual_total_refiner_steps = 0 |
|
|
|
|
|
|
|
is_default_config_state = 1 |
|
|
|
if model_configuration_name_value in online_configurations_object: |
|
|
|
is_default_config_state = 0 |
|
|
|
|
|
|
|
use_refiner = 0 |
|
|
|
if ( |
|
( |
|
(is_default_config_state == 1) and |
|
refining_selection_default_config_field_value |
|
) or ( |
|
(is_default_config_state != 1) and |
|
( |
|
( |
|
(model_configuration_name_value not in model_configuration_force_refiner_object) and |
|
refining_selection_online_config_normal_field_value |
|
) or ( |
|
(model_configuration_name_value in model_configuration_force_refiner_object) and |
|
refining_selection_online_config_automatically_selected_field_value |
|
) |
|
) |
|
) |
|
): |
|
|
|
use_refiner = 1 |
|
|
|
|
|
|
|
if base_model_name_value == "sdxl_turbo": |
|
|
|
negative_prompt_text = "" |
|
base_model_num_inference_steps = base_model_num_inference_steps_field_for_sdxl_turbo |
|
current_actual_total_base_model_steps = base_model_num_inference_steps |
|
guidance_scale = 0 |
|
|
|
|
|
|
|
if ( |
|
(last_model_configuration_name_selected_state_value == "") or |
|
(model_configuration_name_value != last_model_configuration_name_selected_state_value) |
|
): |
|
|
|
if (last_model_configuration_name_selected_state_value != ""): |
|
|
|
if "pipe" in globals(): |
|
del pipe |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Base model is loading" |
|
) |
|
|
|
( |
|
pipe |
|
) = construct_pipe( |
|
base_model_name_value, |
|
model_configuration_name_value |
|
) |
|
|
|
last_model_configuration_name_selected_state_value = model_configuration_name_value |
|
|
|
else: |
|
|
|
pipe = stored_pipe_state |
|
|
|
|
|
|
|
( |
|
scheduler_used |
|
) = configure_scheduler( |
|
pipe, |
|
scheduler_value |
|
) |
|
|
|
|
|
|
|
if use_refiner == 1: |
|
|
|
if (last_refiner_name_selected_state_value == ""): |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Refiner is loading" |
|
) |
|
|
|
refiner = construct_refiner() |
|
|
|
last_refiner_name_selected_state_value = "refiner" |
|
|
|
else: |
|
|
|
refiner = stored_refiner_state |
|
|
|
else: |
|
|
|
refiner = {} |
|
|
|
|
|
|
|
if use_upscaler == 1: |
|
|
|
if (last_upscaler_name_selected_state_value == ""): |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Upscaler is loading" |
|
) |
|
|
|
upscaler = construct_upscaler() |
|
|
|
last_upscaler_name_selected_state_value = "upscaler" |
|
|
|
else: |
|
|
|
upscaler = stored_upscaler_state |
|
|
|
else: |
|
|
|
upscaler = "" |
|
|
|
|
|
|
|
if log_generation_times == 1: |
|
|
|
start_time = time.time() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
tokenizer = pipe.tokenizer |
|
|
|
max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens |
|
|
|
token_length_of_prompt_text = len(tokenizer.tokenize(prompt_text)) |
|
token_length_of_negative_prompt_text = len(tokenizer.tokenize(negative_prompt_text)) |
|
|
|
prompt_text_not_used_substring = "" |
|
|
|
message_about_prompt_truncation = "" |
|
|
|
if token_length_of_prompt_text > max_token_length_of_model: |
|
|
|
( |
|
prompt_text, |
|
prompt_text_not_used_substring |
|
) = truncate_prompt( |
|
pipe, |
|
prompt_text |
|
) |
|
|
|
message_about_prompt_truncation += "Your prompt has been truncated because it is too long. This part has been truncated:<br/><br/><span style=\"font-style: italic;\">" + prompt_text_not_used_substring + "</span>" |
|
|
|
negative_prompt_text_not_used_substring = "" |
|
|
|
if token_length_of_negative_prompt_text > max_token_length_of_model: |
|
|
|
( |
|
negative_prompt_text, |
|
negative_prompt_text_not_used_substring |
|
) = truncate_prompt( |
|
pipe, |
|
negative_prompt_text |
|
) |
|
|
|
if len(message_about_prompt_truncation) > 0: |
|
|
|
message_about_prompt_truncation += "<br/><br/>" |
|
|
|
message_about_prompt_truncation += "Your negative prompt has been truncated because it is too long. This part has been truncated:<br/><br/><span style=\"font-style: italic;\">" + negative_prompt_text_not_used_substring + "</span>" |
|
|
|
prompt_truncated_field_update = gr.HTML( |
|
value = "", |
|
visible = False |
|
) |
|
|
|
if len(message_about_prompt_truncation) > 0: |
|
|
|
prompt_truncated_field_update = gr.HTML( |
|
value = "<div style=\"padding: 10px; background: #fff;\"><span style=\"font-weight: bold;\">Note</span>: " + message_about_prompt_truncation + "</div>", |
|
visible = True |
|
) |
|
|
|
show_message("Note: Part of your prompt has been truncated automatically because it was too long.") |
|
|
|
|
|
|
|
actual_seed = int(actual_seed) |
|
|
|
if actual_seed == 0: |
|
|
|
default_seed_maximum_for_random = default_seed_maximum |
|
|
|
if default_seed_maximum_for_random > 9007199254740992: |
|
|
|
|
|
|
|
default_seed_maximum_for_random = 9007199254740992 |
|
|
|
actual_seed = int(random.randrange(1, 10**len(str(default_seed_maximum)))) |
|
|
|
if seed_not_valid(actual_seed): |
|
|
|
raise Exception("Seed is not valid.") |
|
|
|
generator = torch.manual_seed(actual_seed) |
|
|
|
|
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
|
|
|
|
current_base_model_generation_start_time = 0 |
|
|
|
def callback_function_for_base_model_progress( |
|
callback_pipe, |
|
callback_step_index, |
|
callback_timestep, |
|
callback_kwargs |
|
): |
|
|
|
global current_base_model_generation_start_time |
|
|
|
if int(callback_step_index) == 0: |
|
|
|
current_base_model_generation_start_time = time.time() |
|
|
|
if int(callback_step_index) > 0: |
|
|
|
seconds_per_step = ((time.time() - current_base_model_generation_start_time) / int(callback_step_index)) |
|
|
|
( |
|
time_per_step_hours, |
|
time_per_step_minutes, |
|
time_per_step_seconds |
|
) = convert_seconds(seconds_per_step) |
|
|
|
if time_per_step_hours > 0: |
|
|
|
hours_text = "hr" |
|
|
|
if time_per_step_hours > 1: |
|
|
|
hours_text = "hrs" |
|
|
|
nice_time_per_step = str(int(time_per_step_hours)) + " " + hours_text + ". " + str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec." |
|
|
|
elif time_per_step_minutes > 0: |
|
|
|
nice_time_per_step = str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec." |
|
|
|
else: |
|
|
|
nice_time_per_step = str(round(time_per_step_seconds, 2)) + " seconds" |
|
|
|
base_model_progress_text = nice_time_per_step + " per step" |
|
|
|
else: |
|
|
|
base_model_progress_text = "Base model processing started" |
|
|
|
progress( |
|
progress = ( |
|
callback_step_index, |
|
current_actual_total_base_model_steps |
|
), |
|
desc = base_model_progress_text, |
|
unit = "base model steps" |
|
) |
|
|
|
return {} |
|
|
|
callback_to_do_for_base_model_progress = callback_function_for_base_model_progress |
|
|
|
|
|
|
|
current_refiner_generation_start_time = 0 |
|
|
|
def callback_function_for_refiner_progress( |
|
callback_pipe, |
|
callback_step_index, |
|
callback_timestep, |
|
callback_kwargs |
|
): |
|
|
|
global current_refiner_generation_start_time |
|
|
|
if int(callback_step_index) == 0: |
|
|
|
current_refiner_generation_start_time = time.time() |
|
|
|
if int(callback_step_index) > 0: |
|
|
|
seconds_per_step = ((time.time() - current_refiner_generation_start_time) / int(callback_step_index)) |
|
|
|
( |
|
time_per_step_hours, |
|
time_per_step_minutes, |
|
time_per_step_seconds |
|
) = convert_seconds(seconds_per_step) |
|
|
|
if time_per_step_hours > 0: |
|
|
|
hours_text = "hr" |
|
|
|
if time_per_step_hours > 1: |
|
|
|
hours_text = "hrs" |
|
|
|
nice_time_per_step = str(int(time_per_step_hours)) + " " + hours_text + ". " + str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec." |
|
|
|
elif time_per_step_minutes > 0: |
|
|
|
nice_time_per_step = str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec." |
|
|
|
else: |
|
|
|
nice_time_per_step = str(round(time_per_step_seconds, 2)) + " seconds" |
|
|
|
refiner_progress_text = nice_time_per_step + " per step" |
|
|
|
else: |
|
|
|
refiner_progress_text = "Refner processing started" |
|
|
|
progress( |
|
progress = ( |
|
callback_step_index, |
|
current_actual_total_refiner_steps |
|
), |
|
desc = refiner_progress_text, |
|
unit = "refiner steps" |
|
) |
|
|
|
return {} |
|
|
|
callback_to_do_for_refiner_progress = callback_function_for_refiner_progress |
|
|
|
|
|
|
|
else: |
|
|
|
callback_to_do_for_base_model_progress = None |
|
callback_to_do_for_refiner_progress = None |
|
|
|
|
|
|
|
is_sdxl_online_config = 0 |
|
is_photoreal_online_config = 0 |
|
|
|
if ( |
|
model_configuration_name_value == "sdxl_2023-11-12" or |
|
model_configuration_name_value == "sdxl_2023-09-05" |
|
): |
|
|
|
is_sdxl_online_config = 1 |
|
|
|
elif ( |
|
model_configuration_name_value == "photoreal_2023-11-12" or |
|
model_configuration_name_value == "photoreal_2023-09-01" |
|
): |
|
|
|
is_photoreal_online_config = 1 |
|
|
|
|
|
|
|
|
|
|
|
if ( |
|
(is_sdxl_online_config == 1) or |
|
(is_photoreal_online_config == 1) |
|
): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompt = prompt_text |
|
negative_prompt = negative_prompt_text |
|
width = image_width |
|
height = image_height |
|
scale = guidance_scale |
|
steps = base_model_num_inference_steps |
|
refining = use_refiner |
|
if refining == 1: |
|
refining = "Yes" |
|
upscaling = use_upscaler |
|
if upscaling == 1: |
|
upscaling = "Yes" |
|
|
|
prompt_2 = "" |
|
negative_prompt_2 = "" |
|
|
|
high_noise_frac = refining_denoise_start_for_online_config_field_value |
|
|
|
if (is_sdxl_online_config) == 1: |
|
|
|
add_seed_into_pipe = 1 |
|
|
|
n_steps = refining_number_of_iterations_for_online_config_field_value |
|
|
|
upscaling_num_inference_steps = 15 |
|
|
|
if model_configuration_name_value == "sdxl_2023-09-05": |
|
|
|
upscaling_num_inference_steps = 5 |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Initial image creation has begun."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Initial image creation has begun" |
|
) |
|
|
|
int_image = pipe( |
|
prompt, |
|
prompt_2=prompt_2, |
|
negative_prompt=negative_prompt, |
|
negative_prompt_2=negative_prompt_2, |
|
num_inference_steps=steps, |
|
height=height, |
|
width=width, |
|
guidance_scale=scale, |
|
num_images_per_prompt=1, |
|
generator=generator, |
|
output_type="latent", |
|
callback_on_step_end=callback_to_do_for_base_model_progress |
|
).images |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Refiner steps..."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Refining is beginning" |
|
) |
|
|
|
current_actual_total_refiner_steps = int(int(n_steps) * float(high_noise_frac)) |
|
|
|
nice_refiner_denoise_start = str(refining_denoise_start_for_online_config_field_value) |
|
|
|
refiner_info_for_info_about_prompt_lines_array = [ |
|
"Refiner? Yes" |
|
"Refiner denoise start %: " + nice_refiner_denoise_start, |
|
"Refiner number of iterations: " + str(refining_number_of_iterations_for_online_config_field_value), |
|
"Actual Refining Steps: " + str(current_actual_total_refiner_steps) |
|
] |
|
|
|
image = refiner( |
|
prompt=prompt, |
|
prompt_2=prompt_2, |
|
negative_prompt=negative_prompt, |
|
negative_prompt_2=negative_prompt_2, |
|
image=int_image, |
|
num_inference_steps=n_steps, |
|
denoising_start=high_noise_frac, |
|
callback_on_step_end=callback_to_do_for_refiner_progress |
|
).images[0] |
|
|
|
if upscaling == 'Yes': |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Upscaler steps..."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Upscaling in progress (no steps shown)" |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
upscaled = upscaler( |
|
prompt=prompt, |
|
negative_prompt=negative_prompt, |
|
image=image, |
|
num_inference_steps=upscaling_num_inference_steps, |
|
guidance_scale=0 |
|
).images[0] |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
image_to_return = upscaled |
|
|
|
else: |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
image_to_return = image |
|
|
|
|
|
|
|
elif (is_photoreal_online_config == 1): |
|
|
|
add_seed_into_pipe = 0 |
|
|
|
Prompt = prompt |
|
upscale = refining |
|
|
|
|
|
|
|
if upscale == "Yes": |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Initial image creation has begun."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Initial image creation has begun" |
|
) |
|
|
|
int_image = pipe( |
|
Prompt, |
|
negative_prompt=negative_prompt, |
|
height=height, |
|
width=width, |
|
num_inference_steps=steps, |
|
guidance_scale=scale, |
|
callback_on_step_end=callback_to_do_for_base_model_progress |
|
).images |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Refiner steps..."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Refining is beginning" |
|
) |
|
|
|
default_steps_in_diffusers = 50 |
|
|
|
current_actual_total_refiner_steps = int(default_steps_in_diffusers * float(high_noise_frac)) |
|
|
|
refiner_info_for_info_about_prompt_lines_array = [ |
|
"Refiner? Yes" |
|
"Refiner denoise start %: " + nice_refiner_denoise_start, |
|
"Refiner number of iterations: " + str(current_actual_total_refiner_steps), |
|
"Actual Refining Steps: " + str(current_actual_total_refiner_steps) |
|
] |
|
|
|
image = refiner( |
|
Prompt, |
|
negative_prompt=negative_prompt, |
|
image=int_image, |
|
num_inference_steps=default_steps_in_diffusers, |
|
denoising_start=high_noise_frac, |
|
callback_on_step_end=callback_to_do_for_refiner_progress |
|
).images[0] |
|
|
|
else: |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Image creation has begun."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Image creation has begun" |
|
) |
|
|
|
image = pipe( |
|
Prompt, |
|
negative_prompt=negative_prompt, |
|
height=height, |
|
width=width, |
|
num_inference_steps=steps, |
|
guidance_scale=scale, |
|
callback_on_step_end=callback_to_do_for_base_model_progress |
|
).images[0] |
|
|
|
|
|
|
|
image_to_return = image |
|
|
|
else: |
|
|
|
|
|
|
|
if add_seed_into_pipe == 0: |
|
|
|
generator = None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if use_refiner == 1: |
|
|
|
if refining_use_denoising_start_in_base_model_when_using_refiner_field_value == 1: |
|
|
|
denoising_end = refining_denoise_start_for_default_config_field_value |
|
|
|
current_actual_total_base_model_steps = int(base_model_num_inference_steps * float(refining_denoise_start_for_default_config_field_value)) |
|
|
|
else: |
|
|
|
denoising_end = None |
|
|
|
output_type_before_refiner = "pil" |
|
|
|
if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1: |
|
|
|
output_type_before_refiner = "latent" |
|
|
|
current_actual_total_refiner_steps = (base_model_num_inference_steps - int(base_model_num_inference_steps * float(refining_denoise_start_for_default_config_field_value))) |
|
|
|
refiner_info_for_info_about_prompt_lines_array = [ |
|
"Refiner? Yes" |
|
] |
|
|
|
nice_refiner_denoise_start = str(refining_denoise_start_for_online_config_field_value) |
|
|
|
if refining_use_denoising_start_in_base_model_when_using_refiner_field_value == 1: |
|
|
|
refiner_info_for_info_about_prompt_lines_array.extend([ |
|
"Set \"denoising_end\" in base model generation? Yes", |
|
"Base model denoise end %: " + nice_refiner_denoise_start, |
|
"Actual Base Model Steps: " + str(current_actual_total_base_model_steps) |
|
]) |
|
|
|
else: |
|
|
|
refiner_info_for_info_about_prompt_lines_array.extend([ |
|
"Set \"denoising_end\" in base model generation? No", |
|
]) |
|
|
|
refiner_info_for_info_about_prompt_lines_array.extend([ |
|
"Refiner denoise start %: " + nice_refiner_denoise_start, |
|
"Actual Refining Steps: " + str(current_actual_total_refiner_steps) |
|
]) |
|
|
|
if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1: |
|
|
|
refiner_info_for_info_about_prompt_lines_array.extend([ |
|
"Base model output in latent space before refining? Yes", |
|
]) |
|
|
|
else: |
|
|
|
refiner_info_for_info_about_prompt_lines_array.extend([ |
|
"Base model output in latent space before refining? No", |
|
]) |
|
|
|
if use_upscaler == 1: |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Will create initial image, then refine and then upscale."); |
|
print ("Initial image steps..."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Initial image creation has begun" |
|
) |
|
|
|
intitial_image = pipe( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
width = image_width, |
|
height = image_height, |
|
num_inference_steps = base_model_num_inference_steps, |
|
guidance_scale = guidance_scale, |
|
num_images_per_prompt = 1, |
|
generator = generator, |
|
denoising_end = denoising_end, |
|
output_type = output_type_before_refiner, |
|
callback_on_step_end = callback_to_do_for_base_model_progress |
|
).images |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Refiner steps..."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Refining is beginning" |
|
) |
|
|
|
refined_image = refiner( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
image = intitial_image, |
|
num_inference_steps = base_model_num_inference_steps, |
|
denoising_start = refining_denoise_start_for_default_config_field_value, |
|
output_type = "pil", |
|
callback_on_step_end = callback_to_do_for_refiner_progress |
|
).images |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Upscaler steps..."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Upscaling in progress (no steps shown)" |
|
) |
|
|
|
upscaled_image = upscaler( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
image = refined_image, |
|
num_inference_steps = upscaling_num_inference_steps, |
|
guidance_scale = 0 |
|
).images[0] |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
image_to_return = upscaled_image |
|
|
|
else: |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Will create initial image and then refine."); |
|
print ("Initial image steps..."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Initial image creation has begun" |
|
) |
|
|
|
intitial_image = pipe( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
width = image_width, |
|
height = image_height, |
|
num_inference_steps = base_model_num_inference_steps, |
|
guidance_scale = guidance_scale, |
|
num_images_per_prompt = 1, |
|
generator = generator, |
|
denoising_end = denoising_end, |
|
output_type = output_type_before_refiner, |
|
callback_on_step_end = callback_to_do_for_base_model_progress |
|
).images |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Refiner steps..."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Refining is beginning" |
|
) |
|
|
|
refined_image = refiner( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
image = intitial_image, |
|
num_inference_steps = base_model_num_inference_steps, |
|
denoising_start = refining_denoise_start_for_default_config_field_value, |
|
callback_on_step_end = callback_to_do_for_refiner_progress |
|
).images[0] |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
image_to_return = refined_image |
|
|
|
else: |
|
|
|
if use_upscaler == 1: |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Will create initial image and then upscale."); |
|
print ("Initial image steps..."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Initial image creation has begun" |
|
) |
|
|
|
intitial_image = pipe( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
width = image_width, |
|
height = image_height, |
|
num_inference_steps = base_model_num_inference_steps, |
|
guidance_scale = guidance_scale, |
|
num_images_per_prompt = 1, |
|
generator = generator, |
|
output_type = "pil", |
|
callback_on_step_end = callback_to_do_for_base_model_progress |
|
).images |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Upscaler steps..."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Upscaling in progress (no steps shown)" |
|
) |
|
|
|
upscaled_image = upscaler( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
image = intitial_image, |
|
num_inference_steps = upscaling_num_inference_steps, |
|
guidance_scale = 0 |
|
).images[0] |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
image_to_return = upscaled_image |
|
|
|
else: |
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Will create image (no refining or upscaling)."); |
|
print ("Image steps..."); |
|
|
|
if show_image_creation_progress_log == 1: |
|
|
|
progress( |
|
progress = 0, |
|
desc = "Image creation has begun" |
|
) |
|
|
|
image = pipe( |
|
prompt = prompt_text, |
|
negative_prompt = negative_prompt_text, |
|
width = image_width, |
|
height = image_height, |
|
num_inference_steps = base_model_num_inference_steps, |
|
guidance_scale = guidance_scale, |
|
num_images_per_prompt = 1, |
|
generator = generator, |
|
callback_on_step_end = callback_to_do_for_base_model_progress |
|
).images[0] |
|
|
|
if device == "cuda": |
|
torch.cuda.empty_cache() |
|
|
|
image_to_return = image |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
nice_model_name = base_model_names_object[base_model_name_value] + " (" + model_configuration_links_object[model_configuration_name_value] + ")" |
|
|
|
info_about_prompt_lines_array = [ |
|
"Prompt:\n" + prompt_text |
|
] |
|
|
|
if len(negative_prompt_text) > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Negative Prompt:\n" + negative_prompt_text |
|
]) |
|
|
|
dimensions_title = "Dimensions" |
|
|
|
if use_upscaler == 1: |
|
|
|
dimensions_title = "Original Dimensions" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
dimensions_title + ": " + str(image_width) + "x" + str(image_height) + " px" |
|
]) |
|
|
|
if use_upscaler == 1: |
|
|
|
upscaled_image_width = int(image_width * 2) |
|
upscaled_image_height = int(image_height * 2) |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Upscaled Dimensions: " + str(upscaled_image_width) + "x" + str(upscaled_image_height) + " px" |
|
]) |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Seed: " + str(actual_seed) |
|
]) |
|
|
|
nice_seed_added_to_generation = "No" |
|
|
|
if add_seed_into_pipe == 1: |
|
|
|
nice_seed_added_to_generation = "Yes" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Seed added to generation? " + nice_seed_added_to_generation |
|
]) |
|
|
|
if int(guidance_scale) > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Guidance Scale: " + str(guidance_scale) |
|
]) |
|
|
|
nice_scheduler_name = scheduler_short_names_object[scheduler_used] |
|
|
|
if scheduler_value == "model_default": |
|
|
|
nice_scheduler_name += " (model default)" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Steps: " + str(base_model_num_inference_steps), |
|
"Model: " + nice_model_name, |
|
"Scheduler/Sampler: " + nice_scheduler_name |
|
]) |
|
|
|
if use_refiner == 1: |
|
|
|
|
|
|
|
info_about_prompt_lines_array.extend(refiner_info_for_info_about_prompt_lines_array) |
|
|
|
if use_upscaler == 1: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Upscaled (2x)? Yes", |
|
"Refiner Steps: " + str(upscaling_num_inference_steps) |
|
]) |
|
|
|
if log_generation_times == 1: |
|
|
|
end_time = time.time() |
|
|
|
generation_time_in_seconds = (end_time - start_time) |
|
|
|
( |
|
generation_partial_hours, |
|
generation_partial_minutes, |
|
generation_partial_seconds |
|
) = convert_seconds(generation_time_in_seconds) |
|
|
|
if generation_partial_hours > 0: |
|
|
|
hours_text = "hr" |
|
|
|
if generation_partial_hours > 1: |
|
|
|
hours_text = "hrs" |
|
|
|
nice_generation_time = str(int(generation_partial_hours)) + " " + hours_text + ". " + str(int(generation_partial_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec." |
|
|
|
elif generation_partial_minutes > 0: |
|
|
|
nice_generation_time = str(int(generation_partial_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec." |
|
|
|
else: |
|
|
|
nice_generation_time = str(round(generation_time_in_seconds, 2)) + " sec." |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Time: " + nice_generation_time |
|
]) |
|
|
|
|
|
|
|
if len(prompt_text_not_used_substring) > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"End of Prompt Truncated: " + prompt_text_not_used_substring |
|
]) |
|
|
|
if len(negative_prompt_text_not_used_substring) > 0: |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"End of Negative Prompt Truncated: " + negative_prompt_text_not_used_substring |
|
]) |
|
|
|
|
|
|
|
if display_xformers_usage_in_prompt_info > 0: |
|
|
|
nice_xformers_usage = "No" |
|
|
|
if use_xformers == 1: |
|
|
|
nice_xformers_usage = "Yes" |
|
|
|
if include_transformers_version_in_prompt_info == 1: |
|
|
|
import transformers |
|
|
|
nice_xformers_usage += " (version " + str(transformers.__version__) + ")" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"xFormers Used?: " + nice_xformers_usage |
|
]) |
|
|
|
if display_default_attn_processor_usage_in_prompt_info > 0: |
|
|
|
nice_default_attn_processor_usage = "No" |
|
|
|
if use_default_attn_processor == 1: |
|
|
|
nice_default_attn_processor_usage = "Yes" |
|
|
|
info_about_prompt_lines_array.extend([ |
|
"Default AttnProcessor Used? " + nice_default_attn_processor_usage |
|
]) |
|
|
|
|
|
|
|
output_text_field_update = '\n'.join(info_about_prompt_lines_array) |
|
|
|
|
|
|
|
if auto_save_imagery == 1: |
|
|
|
|
|
|
|
if not os.path.exists(saved_images_dir): |
|
os.makedirs(saved_images_dir) |
|
|
|
yy_mm_dd_date_stamp = datetime.today().strftime('%Y-%m-%d') |
|
|
|
saved_images_date_dir = saved_images_dir + "/" + yy_mm_dd_date_stamp + "/" |
|
|
|
if not os.path.exists(saved_images_date_dir): |
|
os.makedirs(saved_images_date_dir) |
|
|
|
image_count = 1 |
|
|
|
file_name_without_extension = yy_mm_dd_date_stamp + "-" + ('%04d' % image_count) |
|
|
|
saved_image_path_and_file = saved_images_date_dir + file_name_without_extension + ".png" |
|
|
|
while os.path.exists(saved_image_path_and_file): |
|
|
|
file_name_without_extension = yy_mm_dd_date_stamp + "-" + ('%04d' % image_count) |
|
|
|
saved_image_path_and_file = saved_images_date_dir + file_name_without_extension + ".png" |
|
|
|
image_count += 1 |
|
|
|
image_to_return_file = image_to_return.save(saved_image_path_and_file) |
|
|
|
saved_text_file_path_and_file = saved_images_date_dir + file_name_without_extension + ".txt" |
|
|
|
prompt_info_file_handle = open(saved_text_file_path_and_file, "w") |
|
prompt_info_file_handle.writelines(output_text_field_update) |
|
prompt_info_file_handle.close() |
|
|
|
|
|
|
|
output_image_field_update = gr.Image( |
|
value = image_to_return |
|
) |
|
|
|
image_gallery_array_state_value.insert(0, image_to_return) |
|
prompt_information_array_state_value.insert(0, output_text_field_update) |
|
|
|
output_image_gallery_field_update = gr.Gallery( |
|
value = image_gallery_array_state_value, |
|
selected_index = 0 |
|
) |
|
|
|
image_gallery_array_state_update = image_gallery_array_state_value |
|
|
|
prompt_information_array_state_update = prompt_information_array_state_value |
|
|
|
|
|
|
|
if show_messages_in_command_prompt == 1: |
|
|
|
print ("Image created.") |
|
|
|
|
|
|
|
last_model_configuration_name_selected_state_update = last_model_configuration_name_selected_state_value |
|
last_refiner_name_selected_state_update = last_refiner_name_selected_state_value |
|
last_upscaler_name_selected_state_update = last_upscaler_name_selected_state_value |
|
|
|
|
|
|
|
return ( |
|
output_image_field_update, |
|
output_image_gallery_field_update, |
|
output_text_field_update, |
|
prompt_truncated_field_update, |
|
last_model_configuration_name_selected_state_update, |
|
last_refiner_name_selected_state_update, |
|
last_upscaler_name_selected_state_update, |
|
pipe, |
|
refiner, |
|
upscaler |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def cancel_image_processing(): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.Warning("The command prompt window has been closed. Any image generation in progress has been stopped. To generate any other images, you will need to launch the command prompt again.") |
|
|
|
os.system('title kill_window') |
|
|
|
os.system(f'taskkill /f /fi "WINDOWTITLE eq kill_window"') |
|
os.system(f'taskkill /f /fi "WINDOWTITLE eq kill_window"') |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
css_to_use = "footer{display:none !important}" |
|
|
|
with gr.Blocks( |
|
title = "AI Image Creation", |
|
css = css_to_use, |
|
theme = gr.themes.Default( |
|
spacing_size = gr.themes.sizes.spacing_md, |
|
|
|
radius_size = gr.themes.sizes.radius_none |
|
), |
|
analytics_enabled = False |
|
) as sd_interface: |
|
|
|
|
|
|
|
image_gallery_array_state = gr.State([]) |
|
|
|
prompt_information_array_state = gr.State([]) |
|
|
|
last_model_configuration_name_selected_state = gr.State("") |
|
last_refiner_name_selected_state = gr.State("") |
|
last_upscaler_name_selected_state = gr.State("") |
|
|
|
stored_pipe_state = gr.State({}) |
|
stored_refiner_state = gr.State({}) |
|
stored_upscaler_state = gr.State({}) |
|
|
|
gr.Markdown(opening_html) |
|
|
|
with gr.Row(): |
|
|
|
with gr.Column(scale = 1): |
|
|
|
generate_image_btn = gr.Button( |
|
value = "Generate", |
|
variant = "primary" |
|
) |
|
|
|
with gr.Group(): |
|
|
|
with gr.Row(): |
|
|
|
prompt_field = gr.Textbox( |
|
label = "Prompt (77 token limit):", |
|
value = default_prompt |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "negative_prompt_field_row_id", |
|
visible = default_negative_prompt_field_row_visibility |
|
): |
|
|
|
negative_prompt_field = gr.Textbox( |
|
label = "Negative Prompt (77 token limit):", |
|
value = default_negative_prompt |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "negative_prompt_for_sdxl_turbo_field_row_id", |
|
visible = default_negative_prompt_for_sdxl_turbo_field_row_visibility |
|
): |
|
|
|
negative_prompt_for_sdxl_turbo_field = gr.HTML( |
|
value = "<div style=\"padding: 10px; text-align: center; background: #fff;\">Negative prompt is not used for SDXL Turbo.</div>" |
|
) |
|
|
|
with gr.Group( |
|
visible = refiner_group_visible |
|
): |
|
|
|
with gr.Accordion( |
|
elem_id = "refiner_default_config_accordion_id", |
|
label = "Refiner (Default configuration)", |
|
open = refiner_default_config_accordion_open, |
|
visible = refiner_default_config_accordion_visible |
|
) as refiner_default_config_accordion: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
|
gr.Markdown("This can be used if the image has too much noise.") |
|
|
|
with gr.Row(): |
|
|
|
refining_selection_default_config_field = gr.Radio( |
|
choices = ["Yes", "No"], |
|
value = default_refine_option, |
|
show_label = False, |
|
container = False |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_denoise_start_for_default_config_field = gr.Slider( |
|
label = "Refiner denoise start %", |
|
minimum = 0.7, |
|
maximum = 0.99, |
|
value = 0.95, |
|
step = 0.01 |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_use_denoising_start_in_base_model_when_using_refiner_field = gr.Checkbox( |
|
label = "Use \"denoising_start\" value as \"denoising_end\" value in base model generation when using refiner", |
|
value = default_use_denoising_start_in_base_model_when_using_refiner_is_selected, |
|
interactive = True, |
|
container = True |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_base_model_output_to_refiner_is_in_latent_space_field = gr.Checkbox( |
|
label = "Base model output in latent space instead of PIL image when using refiner", |
|
value = default_base_model_output_to_refiner_is_in_latent_space_is_selected, |
|
interactive = True, |
|
container = True |
|
) |
|
|
|
with gr.Accordion( |
|
elem_id = "refiner_online_config_accordion_id", |
|
label = "Refiner (Online configuration)", |
|
open = refiner_online_config_accordion_open, |
|
visible = refiner_online_config_accordion_visible |
|
) as refiner_online_config_accordion: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
refining_selection_automatically_selected_message_field_visible = False |
|
|
|
refining_selection_online_config_normal_field_visible = True |
|
refining_selection_online_config_automatically_selected_field_visible = False |
|
|
|
if model_configuration_requires_refiner == 1: |
|
|
|
refining_selection_automatically_selected_message_field_visible = True |
|
|
|
refining_selection_online_config_normal_field_visible = False |
|
refining_selection_online_config_automatically_selected_field_visible = True |
|
|
|
with gr.Row(): |
|
|
|
refining_selection_automatically_selected_message_field = gr.Markdown( |
|
elem_id = "refining_selection_automatically_selected_message_field_id", |
|
value = "The online configuration you selected automatically uses the refiner.", |
|
visible = refining_selection_automatically_selected_message_field_visible |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_selection_online_config_normal_field = gr.Radio( |
|
elem_id = "refining_selection_online_config_normal_field_id", |
|
choices = ["Yes", "No"], |
|
value = default_refine_option, |
|
show_label = False, |
|
container = False, |
|
visible = refining_selection_online_config_normal_field_visible |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_selection_online_config_automatically_selected_field = gr.Radio( |
|
elem_id = "refining_selection_online_config_automatically_selected_field_id", |
|
choices = ["Yes"], |
|
value = "Yes", |
|
show_label = False, |
|
container = False, |
|
visible = refining_selection_online_config_automatically_selected_field_visible |
|
) |
|
|
|
with gr.Row(): |
|
|
|
refining_denoise_start_for_online_config_field = gr.Slider( |
|
label = "Refiner denoise start %", |
|
minimum = 0.7, |
|
maximum = 0.99, |
|
value = 0.95, |
|
step = 0.01 |
|
) |
|
|
|
refining_number_of_iterations_for_online_config_field_visible = False |
|
|
|
if default_model_configuration in model_configuration_include_refiner_number_of_steps_object: |
|
|
|
refining_number_of_iterations_for_online_config_field_visible = True |
|
|
|
with gr.Row( |
|
elem_id = "refining_number_of_iterations_for_online_config_field_row_id", |
|
visible = refining_number_of_iterations_for_online_config_field_visible |
|
): |
|
|
|
refining_number_of_iterations_for_online_config_field = gr.Slider( |
|
label = "Refiner number of iterations", |
|
minimum = 1, |
|
maximum = 100, |
|
value = 100, |
|
step = 1 |
|
) |
|
|
|
with gr.Group( |
|
visible = upscaler_group_visible |
|
): |
|
|
|
with gr.Accordion( |
|
label = "Upscaler", |
|
elem_id = "upscaler_accordion_id", |
|
open = upscaler_accordion_open, |
|
visible = upscaler_group_visible |
|
): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
|
gr.Markdown("Upscale by 2x?") |
|
|
|
with gr.Row(): |
|
|
|
upscaling_selection_field = gr.Radio( |
|
choices = ['Yes', 'No'], |
|
value = default_upscale_option, |
|
show_label = False, |
|
container = False |
|
) |
|
|
|
with gr.Row(): |
|
|
|
upscaling_num_inference_steps_field = gr.Slider( |
|
label = "Upscaler number of steps", |
|
minimum = 1, |
|
maximum = maximum_upscaler_steps, |
|
value = default_upscaler_steps, |
|
step = 1 |
|
) |
|
|
|
if ( |
|
(enable_refiner == 1) or |
|
(enable_upscaler == 1) |
|
): |
|
|
|
refiner_and_upscaler_text_field = gr.HTML( |
|
value = "<div id=\"refiner_and_upscaler_info_message_div_id\" style=\"text-align: center;\">" + default_refiner_and_upscaler_status_text + "</div>" |
|
) |
|
|
|
with gr.Column(scale = 1): |
|
|
|
with gr.Group(): |
|
|
|
with gr.Row(): |
|
|
|
base_model_field = gr.Dropdown( |
|
label = "Base Model:", |
|
choices = default_base_model_choices_array, |
|
value = default_base_model_nicely_named_value, |
|
type = "index", |
|
filterable = False, |
|
|
|
interactive = True |
|
) |
|
|
|
model_configuration_dropdown_field_values_for_js = "" |
|
|
|
model_configuration_dropdown_fields_array = [] |
|
|
|
for this_base_model in base_model_array: |
|
|
|
this_model_configuration_choices_array = [] |
|
|
|
for this_model_configuration in base_model_object_of_model_configuration_arrays[this_base_model]: |
|
|
|
this_model_configuration_choices_array.append( |
|
model_configuration_names_object[this_model_configuration] |
|
) |
|
|
|
this_configuration_field_row_visibility = False |
|
|
|
if ( |
|
(this_base_model == default_base_model) and |
|
(allow_online_configurations == 1) |
|
): |
|
|
|
this_configuration_field_row_visibility = True |
|
|
|
this_configuration_field_default_value = model_configuration_names_object[base_model_model_configuration_defaults_object[this_base_model]] |
|
|
|
this_configuration_field_default_value_for_js = this_configuration_field_default_value |
|
this_configuration_field_default_value_for_js = this_configuration_field_default_value_for_js.replace("\"", "\\\"") |
|
|
|
model_configuration_dropdown_field_values_for_js += "\"" + this_base_model + "\": \"" + this_configuration_field_default_value_for_js + "\"," |
|
|
|
with gr.Row( |
|
elem_id = "model_configuration_field_" + this_base_model + "_row_id", |
|
visible = this_configuration_field_row_visibility |
|
): |
|
|
|
this_configuration_field = gr.Dropdown( |
|
label = "Configuration Type:", |
|
choices = this_model_configuration_choices_array, |
|
value = this_configuration_field_default_value, |
|
type = "index", |
|
filterable = False, |
|
|
|
interactive = True |
|
) |
|
|
|
model_configuration_dropdown_fields_array.append(this_configuration_field) |
|
|
|
with gr.Row(): |
|
|
|
scheduler_field = gr.Dropdown( |
|
elem_id = "scheduler_field_id", |
|
label = "Scheduler / Sampler:", |
|
choices = default_scheduler_choices_array, |
|
value = default_scheduler_nicely_named_value, |
|
type = "index", |
|
filterable = False, |
|
|
|
interactive = True |
|
) |
|
|
|
with gr.Row(): |
|
|
|
image_width_field = gr.Slider( |
|
label = "Width:", |
|
minimum = 256, |
|
maximum = 1024, |
|
value = default_width, |
|
step = width_and_height_input_slider_steps, |
|
interactive = True |
|
) |
|
|
|
image_height_field = gr.Slider( |
|
label = "Height:", |
|
minimum = 256, |
|
maximum = 1024, |
|
value = default_height, |
|
step = width_and_height_input_slider_steps, |
|
interactive = True |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "base_model_num_inference_steps_field_row_id", |
|
visible = default_base_model_num_inference_steps_field_row_visibility |
|
): |
|
|
|
base_model_num_inference_steps_field = gr.Slider( |
|
label = "Steps:", |
|
minimum = 1, |
|
maximum = 100, |
|
value = default_base_model_base_model_num_inference_steps, |
|
step = 1, |
|
interactive = True |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "base_model_num_inference_steps_field_for_sdxl_turbo_field_row_id", |
|
visible = default_base_model_num_inference_steps_field_for_sdxl_turbo_field_row_visibility |
|
): |
|
|
|
base_model_num_inference_steps_field_for_sdxl_turbo_field = gr.Slider( |
|
label = "Steps:", |
|
info = "Try using only 1 or a couple of steps.", |
|
minimum = 1, |
|
maximum = 25, |
|
value = default_base_model_base_model_num_inference_steps_for_sdxl_turbo, |
|
step = 1, |
|
interactive = True |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "guidance_scale_field_row_id", |
|
visible = default_guidance_scale_field_row_visibility |
|
): |
|
|
|
guidance_scale_field = gr.Slider( |
|
label = "Guidance Scale:", |
|
minimum = 1, |
|
maximum = 15, |
|
value = default_guidance_scale_value, |
|
step = 0.25, |
|
interactive = True |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "guidance_scale_for_sdxl_turbo_field_row_id", |
|
visible = default_guidance_scale_for_sdxl_turbo_field_row_visibility |
|
): |
|
|
|
guidance_scale_for_sdxl_turbo_field = gr.HTML( |
|
value = "<div style=\"padding: 10px; text-align: center; background: #fff;\">Guidance scale is not used for SDXL Turbo.</div>" |
|
) |
|
|
|
with gr.Row(): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if make_seed_selection_a_textbox == 1: |
|
|
|
seed_field = gr.Textbox( |
|
label = "Seed (0 is random):", |
|
value = default_seed_value, |
|
interactive = True, |
|
info = "Maximum: " + str(default_seed_maximum) |
|
) |
|
|
|
else: |
|
|
|
seed_field = gr.Slider( |
|
label = "Seed (0 is random):", |
|
minimum = 0, |
|
maximum = default_seed_maximum, |
|
value = default_seed_value, |
|
step = 1, |
|
interactive = True |
|
) |
|
|
|
with gr.Row( |
|
elem_id = "add_seed_into_pipe_field_row_id", |
|
visible = default_add_seed_into_pipe_field_row_visibility |
|
): |
|
|
|
add_seed_into_pipe_field = gr.Checkbox( |
|
label = "Add seed to generation (to make it deterministic)", |
|
value = default_add_seed_into_pipe_is_selected, |
|
interactive = True, |
|
container = True |
|
) |
|
|
|
|
|
|
|
with gr.Column(scale = 1): |
|
|
|
image_field_visibility = True |
|
image_gallery_field_visibility = False |
|
|
|
if use_image_gallery == 1: |
|
|
|
image_field_visibility = False |
|
image_gallery_field_visibility = True |
|
|
|
with gr.Row( |
|
visible = image_field_visibility |
|
): |
|
|
|
output_image_field = gr.Image( |
|
label = "Generated Image", |
|
type = "pil" |
|
) |
|
|
|
with gr.Row( |
|
visible = image_gallery_field_visibility |
|
): |
|
|
|
show_download_button = False |
|
|
|
show_download_button = True |
|
|
|
output_image_gallery_field = gr.Gallery( |
|
elem_id = "image_gallery_id", |
|
label = "Generated Images", |
|
value = [], |
|
selected_index = 0, |
|
allow_preview = "True", |
|
preview = True, |
|
show_download_button = show_download_button |
|
) |
|
|
|
with gr.Row(): |
|
|
|
output_text_field = gr.Textbox( |
|
label = "Prompt Information:", |
|
value = "After an image is generated, its generation information will appear here." + additional_prompt_info_html, |
|
show_copy_button = True, |
|
lines = 10, |
|
max_lines = 20, |
|
container = True |
|
) |
|
|
|
with gr.Row(): |
|
|
|
prompt_truncated_field = gr.HTML( |
|
value = "", |
|
visible = False |
|
) |
|
|
|
if enable_close_command_prompt_button == 1: |
|
|
|
cancel_image_btn = gr.Button( |
|
value = "Close Command Prompt / Cancel", |
|
variant = "stop" |
|
) |
|
|
|
gr.Markdown("Closing the command prompt will cancel any images in the process of being created. You will need to launch it again, and then likely refresh the page, to create more images.") |
|
|
|
|
|
|
|
if len(ending_html) > 0: |
|
|
|
with gr.Accordion( |
|
elem_id = "information_section_id", |
|
label = "Information", |
|
open = True |
|
): |
|
|
|
gr.Markdown(ending_html) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
update_refiner_and_upscaler_status_function_js = """ |
|
|
|
function updateRefinerAndUpscalerStatus( |
|
baseModelFieldFullNameValue, |
|
refiningSelectionDefaultConfigFieldValue, |
|
refiningSelectionOnlineConfigNormalFieldValue, |
|
refiningSelectionOnlineConfigAutomaticallySelectedFieldValue, |
|
upscalingSelectionFieldValue |
|
) {{ |
|
"use strict"; |
|
|
|
var baseModelNamesObject = {0}; |
|
var modelConfigurationNamesObject = {1}; |
|
var onlineConfigurationsObject = {2}; |
|
var modelConfigurationForceRefinerObject = {3}; |
|
|
|
var refinerOnText = "{4}"; |
|
var refinerOffText = "{5}"; |
|
var upscalerOnText = "{6}"; |
|
var upscalerOffText = "{7}"; |
|
|
|
var baseModelFullNamesToBaseModelIdConversion = {{}}; |
|
Object.keys(baseModelNamesObject).forEach(key => {{ |
|
baseModelFullNamesToBaseModelIdConversion[baseModelNamesObject[key]] = key; |
|
}}); |
|
var baseModelFieldValue = ""; |
|
if (baseModelFullNamesToBaseModelIdConversion.hasOwnProperty(baseModelFieldFullNameValue)) {{ |
|
baseModelFieldValue = baseModelFullNamesToBaseModelIdConversion[baseModelFieldFullNameValue]; |
|
}} |
|
|
|
var modelConfigurationFullNameValue = window.modelConfigurationDropdownFieldValuesObject[baseModelFieldValue]; |
|
|
|
var modelConfigurationFullNamesToModelConfigurationIdConversion = {{}}; |
|
Object.keys(modelConfigurationNamesObject).forEach(key => {{ |
|
modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationNamesObject[key]] = key; |
|
}}); |
|
var modelConfigurationNameValue = ""; |
|
if (modelConfigurationFullNamesToModelConfigurationIdConversion.hasOwnProperty(modelConfigurationFullNameValue)) {{ |
|
modelConfigurationNameValue = modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationFullNameValue]; |
|
}} |
|
|
|
var refinerAndUpscalerInfoMessageHtml = ""; |
|
|
|
if ( |
|
baseModelFieldValue && |
|
modelConfigurationNameValue |
|
) {{ |
|
|
|
var isDefaultConfigState = 1; |
|
|
|
if (onlineConfigurationsObject[modelConfigurationNameValue]) {{ |
|
|
|
isDefaultConfigState = 0; |
|
|
|
}} |
|
|
|
if ( |
|
( |
|
(isDefaultConfigState === 1) && |
|
(refiningSelectionDefaultConfigFieldValue === "Yes") |
|
) || ( |
|
(isDefaultConfigState !== 1) && |
|
( |
|
( |
|
(!Object.keys(modelConfigurationForceRefinerObject).includes(modelConfigurationNameValue)) && |
|
(refiningSelectionOnlineConfigNormalFieldValue === "Yes") |
|
) || ( |
|
(Object.keys(modelConfigurationForceRefinerObject).includes(modelConfigurationNameValue)) && |
|
(refiningSelectionOnlineConfigAutomaticallySelectedFieldValue === "Yes") |
|
) |
|
) |
|
) |
|
) {{ |
|
|
|
refinerAndUpscalerInfoMessageHtml += refinerOnText; |
|
|
|
}} |
|
else {{ |
|
|
|
refinerAndUpscalerInfoMessageHtml += refinerOffText; |
|
|
|
}} |
|
|
|
if (upscalingSelectionFieldValue === "Yes") {{ |
|
|
|
refinerAndUpscalerInfoMessageHtml += upscalerOnText; |
|
|
|
}} |
|
else {{ |
|
|
|
refinerAndUpscalerInfoMessageHtml += upscalerOffText; |
|
|
|
}} |
|
|
|
}} |
|
|
|
document.getElementById("refiner_and_upscaler_info_message_div_id").innerHTML = refinerAndUpscalerInfoMessageHtml; |
|
|
|
}} |
|
|
|
""".format( |
|
base_model_names_object, |
|
model_configuration_names_object, |
|
online_configurations_object, |
|
model_configuration_force_refiner_object, |
|
refiner_on_text, |
|
refiner_off_text, |
|
upscaler_on_text, |
|
upscaler_off_text |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_change_function_js = """ |
|
|
|
function modelChange( |
|
baseModelFieldFullNameValue, |
|
possiblyModelConfigurationFullNameValue |
|
) {{ |
|
"use strict"; |
|
|
|
var baseModelNamesObject = {0}; |
|
var modelConfigurationNamesObject = {1}; |
|
|
|
var baseModelArray = {2}; |
|
|
|
var onlineConfigurationsObject = {3}; |
|
var modelConfigurationForceRefinerObject = {4}; |
|
var modelConfigurationIncludeRefinerNumberOfStepsObject = {5}; |
|
|
|
var allowOnlineConfigurations = {6}; |
|
|
|
var baseModelFullNamesToBaseModelIdConversion = {{}}; |
|
Object.keys(baseModelNamesObject).forEach(key => {{ |
|
baseModelFullNamesToBaseModelIdConversion[baseModelNamesObject[key]] = key; |
|
}}); |
|
var baseModelFieldValue = ""; |
|
if (baseModelFullNamesToBaseModelIdConversion.hasOwnProperty(baseModelFieldFullNameValue)) {{ |
|
baseModelFieldValue = baseModelFullNamesToBaseModelIdConversion[baseModelFieldFullNameValue]; |
|
}} |
|
|
|
var modelConfigurationFullNameValue = "" |
|
|
|
var isBaseModelDropdownChange = 0 |
|
|
|
if (baseModelFieldFullNameValue === possiblyModelConfigurationFullNameValue) {{ |
|
|
|
isBaseModelDropdownChange = 1; |
|
|
|
modelConfigurationFullNameValue = window.modelConfigurationDropdownFieldValuesObject[baseModelFieldValue]; |
|
|
|
}} |
|
else {{ |
|
|
|
modelConfigurationFullNameValue = possiblyModelConfigurationFullNameValue; |
|
|
|
window.modelConfigurationDropdownFieldValuesObject[baseModelFieldValue] = modelConfigurationFullNameValue; |
|
|
|
}} |
|
|
|
var modelConfigurationFullNamesToModelConfigurationIdConversion = {{}}; |
|
Object.keys(modelConfigurationNamesObject).forEach(key => {{ |
|
modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationNamesObject[key]] = key; |
|
}}); |
|
var modelConfigurationNameValue = ""; |
|
if (modelConfigurationFullNamesToModelConfigurationIdConversion.hasOwnProperty(modelConfigurationFullNameValue)) {{ |
|
modelConfigurationNameValue = modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationFullNameValue]; |
|
}} |
|
|
|
for (var thisBaseModel of baseModelArray) {{ |
|
|
|
var thisModelConfigurationElementId = "model_configuration_field_" + thisBaseModel + "_row_id"; |
|
|
|
var thisModelConfigurationElementDisplay = "none"; |
|
|
|
if ( |
|
(thisBaseModel === baseModelFieldValue) && |
|
(allowOnlineConfigurations === 1) |
|
) {{ |
|
|
|
thisModelConfigurationElementDisplay = "block"; |
|
|
|
}} |
|
|
|
document.getElementById(thisModelConfigurationElementId).style.display = thisModelConfigurationElementDisplay; |
|
|
|
}} |
|
|
|
var modelConfigurationFullNamesToModelConfigurationIdConversion = {{}}; |
|
Object.keys(modelConfigurationNamesObject).forEach(key => {{ |
|
modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationNamesObject[key]] = key; |
|
}}); |
|
var modelConfigurationNameValue = ""; |
|
if (modelConfigurationFullNamesToModelConfigurationIdConversion.hasOwnProperty(modelConfigurationFullNameValue)) {{ |
|
modelConfigurationNameValue = modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationFullNameValue]; |
|
}} |
|
|
|
if ( |
|
baseModelFieldValue && |
|
modelConfigurationNameValue |
|
) {{ |
|
|
|
var isDefaultConfigState = 1; |
|
|
|
if (onlineConfigurationsObject[modelConfigurationNameValue]) {{ |
|
|
|
isDefaultConfigState = 0; |
|
|
|
}} |
|
|
|
var negativePromptFieldDisplay = "block"; |
|
var negativePromptForSdxlTurboFieldDisplay = "none"; |
|
var baseModelNumInferenceStepsFieldDisplay = "block"; |
|
var baseModelNumInferenceStepsFieldForSdxlTurboFieldDisplay = "none"; |
|
var guidanceScaleFieldDisplay = "block"; |
|
var guidanceScaleForSdxlTurboFieldDisplay = "none"; |
|
|
|
if (baseModelFieldValue === "sdxl_turbo") {{ |
|
|
|
negativePromptFieldDisplay = "none"; |
|
negativePromptForSdxlTurboFieldDisplay = "block"; |
|
baseModelNumInferenceStepsFieldDisplay = "none"; |
|
baseModelNumInferenceStepsFieldForSdxlTurboFieldDisplay = "block"; |
|
guidanceScaleFieldDisplay = "none"; |
|
guidanceScaleForSdxlTurboFieldDisplay = "block"; |
|
|
|
}} |
|
|
|
document.getElementById("negative_prompt_field_row_id").style.display = negativePromptFieldDisplay; |
|
document.getElementById("negative_prompt_for_sdxl_turbo_field_row_id").style.display = negativePromptForSdxlTurboFieldDisplay; |
|
document.getElementById("base_model_num_inference_steps_field_row_id").style.display = baseModelNumInferenceStepsFieldDisplay; |
|
document.getElementById("base_model_num_inference_steps_field_for_sdxl_turbo_field_row_id").style.display = baseModelNumInferenceStepsFieldForSdxlTurboFieldDisplay; |
|
document.getElementById("guidance_scale_field_row_id").style.display = guidanceScaleFieldDisplay; |
|
document.getElementById("guidance_scale_for_sdxl_turbo_field_row_id").style.display = guidanceScaleForSdxlTurboFieldDisplay; |
|
|
|
var refinerDefaultConfigAccordionDisplay = "none"; |
|
var refinerOnlineConfigAccordionDisplay = "block"; |
|
|
|
var addSeedIntoPipeFieldDisplay = "none"; |
|
|
|
if (isDefaultConfigState === 1) {{ |
|
|
|
refinerDefaultConfigAccordionDisplay = "block"; |
|
refinerOnlineConfigAccordionDisplay = "none"; |
|
|
|
addSeedIntoPipeFieldDisplay = "block"; |
|
|
|
}} |
|
|
|
document.getElementById("refiner_default_config_accordion_id").style.display = refinerDefaultConfigAccordionDisplay; |
|
document.getElementById("refiner_online_config_accordion_id").style.display = refinerOnlineConfigAccordionDisplay; |
|
|
|
document.getElementById("add_seed_into_pipe_field_row_id").style.display = addSeedIntoPipeFieldDisplay; |
|
|
|
var refiningSelectionAutomaticallySelectedMessageFieldDisplay = "none"; |
|
|
|
var refiningSelectionOnlineConfigNormalFieldDisplay = "block"; |
|
var refiningSelectionOnlineConfigAutomaticallySelectedFieldDisplay = "none"; |
|
|
|
if (Object.keys(modelConfigurationForceRefinerObject).includes(modelConfigurationNameValue)) {{ |
|
|
|
refiningSelectionAutomaticallySelectedMessageFieldDisplay = "block"; |
|
|
|
refiningSelectionOnlineConfigNormalFieldDisplay = "none"; |
|
refiningSelectionOnlineConfigAutomaticallySelectedFieldDisplay = "block"; |
|
|
|
}} |
|
|
|
var refiningNumberOfIterationsForOnlineConfigFieldDisplay = "none"; |
|
|
|
if (Object.keys(modelConfigurationIncludeRefinerNumberOfStepsObject).includes(modelConfigurationNameValue)) {{ |
|
|
|
refiningNumberOfIterationsForOnlineConfigFieldDisplay = "block"; |
|
|
|
}} |
|
|
|
document.getElementById("refining_selection_automatically_selected_message_field_id").style.display = refiningSelectionAutomaticallySelectedMessageFieldDisplay; |
|
document.getElementById("refining_selection_online_config_normal_field_id").style.display = refiningSelectionOnlineConfigNormalFieldDisplay; |
|
document.getElementById("refining_selection_online_config_automatically_selected_field_id").style.display = refiningSelectionOnlineConfigAutomaticallySelectedFieldDisplay; |
|
document.getElementById("refining_number_of_iterations_for_online_config_field_row_id").style.display = refiningNumberOfIterationsForOnlineConfigFieldDisplay; |
|
|
|
}} |
|
|
|
}} |
|
|
|
""".format( |
|
base_model_names_object, |
|
model_configuration_names_object, |
|
base_model_array, |
|
online_configurations_object, |
|
model_configuration_force_refiner_object, |
|
model_configuration_include_refiner_number_of_steps_object, |
|
allow_online_configurations |
|
) |
|
|
|
|
|
|
|
base_model_field.change( |
|
fn = None, |
|
inputs = [ |
|
base_model_field |
|
], |
|
outputs = None, |
|
js = model_change_function_js |
|
) |
|
|
|
|
|
|
|
for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: |
|
|
|
this_model_configuration_dropdown_field.change( |
|
fn = None, |
|
inputs = [ |
|
base_model_field, |
|
this_model_configuration_dropdown_field |
|
], |
|
outputs = None, |
|
js = model_change_function_js |
|
) |
|
|
|
|
|
|
|
output_image_gallery_field.select( |
|
fn = update_prompt_info_from_gallery, |
|
inputs = [ |
|
prompt_information_array_state |
|
], |
|
outputs = [ |
|
output_image_gallery_field, |
|
output_text_field |
|
], |
|
show_progress = "hidden" |
|
) |
|
|
|
|
|
|
|
if ( |
|
(enable_refiner == 1) or |
|
(enable_upscaler == 1) |
|
): |
|
|
|
triggers_array = [] |
|
|
|
if enable_refiner == 1: |
|
|
|
triggers_array.extend([ |
|
base_model_field.change, |
|
refining_selection_default_config_field.change, |
|
refining_selection_online_config_normal_field.change, |
|
refining_selection_online_config_automatically_selected_field.change |
|
]) |
|
|
|
for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: |
|
|
|
triggers_array.extend([ |
|
this_model_configuration_dropdown_field.change |
|
]) |
|
|
|
if enable_upscaler == 1: |
|
|
|
triggers_array.extend([ |
|
upscaling_selection_field.change |
|
]) |
|
|
|
gr.on( |
|
triggers = triggers_array, |
|
fn = None, |
|
inputs = [ |
|
base_model_field, |
|
refining_selection_default_config_field, |
|
refining_selection_online_config_normal_field, |
|
refining_selection_online_config_automatically_selected_field, |
|
upscaling_selection_field |
|
], |
|
outputs = None, |
|
show_progress = "hidden", |
|
queue = False, |
|
js = update_refiner_and_upscaler_status_function_js |
|
) |
|
|
|
|
|
|
|
create_image_function_inputs = [ |
|
|
|
base_model_field, |
|
|
|
prompt_field, |
|
negative_prompt_field, |
|
|
|
scheduler_field, |
|
|
|
image_width_field, |
|
image_height_field, |
|
guidance_scale_field, |
|
base_model_num_inference_steps_field, |
|
base_model_num_inference_steps_field_for_sdxl_turbo_field, |
|
seed_field, |
|
add_seed_into_pipe_field, |
|
|
|
refining_selection_default_config_field, |
|
refining_selection_online_config_normal_field, |
|
refining_selection_online_config_automatically_selected_field, |
|
|
|
refining_denoise_start_for_default_config_field, |
|
refining_use_denoising_start_in_base_model_when_using_refiner_field, |
|
refining_base_model_output_to_refiner_is_in_latent_space_field, |
|
|
|
refining_denoise_start_for_online_config_field, |
|
refining_number_of_iterations_for_online_config_field, |
|
|
|
upscaling_selection_field, |
|
upscaling_num_inference_steps_field, |
|
|
|
image_gallery_array_state, |
|
prompt_information_array_state, |
|
last_model_configuration_name_selected_state, |
|
last_refiner_name_selected_state, |
|
last_upscaler_name_selected_state, |
|
stored_pipe_state, |
|
stored_refiner_state, |
|
stored_upscaler_state |
|
|
|
] |
|
|
|
for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: |
|
|
|
create_image_function_inputs.append( |
|
this_model_configuration_dropdown_field |
|
) |
|
|
|
|
|
|
|
generate_image_btn_click_event = generate_image_btn.click( |
|
fn = before_create_image_function, |
|
inputs = None, |
|
outputs = [ |
|
generate_image_btn, |
|
output_image_field, |
|
output_image_gallery_field, |
|
output_text_field |
|
], |
|
show_progress = "hidden", |
|
queue = True |
|
).then( |
|
fn = create_image_function, |
|
inputs = create_image_function_inputs, |
|
outputs = [ |
|
output_image_field, |
|
output_image_gallery_field, |
|
output_text_field, |
|
prompt_truncated_field, |
|
last_model_configuration_name_selected_state, |
|
last_refiner_name_selected_state, |
|
last_upscaler_name_selected_state, |
|
stored_pipe_state, |
|
stored_refiner_state, |
|
stored_upscaler_state |
|
], |
|
show_progress = "full", |
|
queue = True |
|
).then( |
|
fn = after_create_image_function, |
|
inputs = None, |
|
outputs = [ |
|
generate_image_btn, |
|
output_text_field |
|
], |
|
show_progress = "hidden", |
|
queue = True |
|
) |
|
|
|
|
|
|
|
if enable_close_command_prompt_button == 1: |
|
|
|
|
|
|
|
cancel_image_btn.click( |
|
fn = cancel_image_processing, |
|
inputs = None, |
|
outputs = None, |
|
cancels = [generate_image_btn_click_event], |
|
queue = True |
|
) |
|
|
|
|
|
|
|
|
|
|
|
model_configuration_dropdown_field_values_for_js = model_configuration_dropdown_field_values_for_js[:-1] |
|
|
|
|
|
|
|
script_on_load_js = """ |
|
|
|
function scriptOnLoad() {{ |
|
"use strict"; |
|
|
|
window.modelConfigurationDropdownFieldValuesObject = {{{0}}}; |
|
|
|
}} |
|
|
|
""".format( |
|
model_configuration_dropdown_field_values_for_js |
|
) |
|
|
|
|
|
|
|
sd_interface_load_kwargs = { |
|
"scroll_to_output": False, |
|
"show_progress": "full" |
|
} |
|
|
|
sd_interface_continuous = sd_interface.load( |
|
fn = None, |
|
inputs = None, |
|
outputs = None, |
|
js = script_on_load_js, |
|
**sd_interface_load_kwargs |
|
) |
|
|
|
|
|
|
|
sd_interface.queue( |
|
max_size = max_queue_size |
|
) |
|
|
|
|
|
|
|
inbrowser = False |
|
|
|
if auto_open_browser == 1: |
|
|
|
inbrowser = True |
|
|
|
sd_interface.launch( |
|
inbrowser = inbrowser, |
|
share = None, |
|
show_api = False, |
|
quiet = True, |
|
show_error = True, |
|
state_session_capacity = 10000, |
|
max_threads = 40 |
|
) |
|
|