diff --git "a/app.py" "b/app.py"
--- "a/app.py"
+++ "b/app.py"
@@ -1,10 +1,11 @@
import gradio as gr
import torch
-#import numpy as np
import modin.pandas as pd
from PIL import Image
from diffusers import DiffusionPipeline
import os
+import random
+import torchsde
##########
@@ -17,7 +18,6 @@ import os
##########
-
# Tested with gradio version 4.8.0
# https://www.gradio.app/main/docs/interface
@@ -115,6 +115,19 @@ default_base_model = "sdxl"
####################
+#
+# Use Safety Checker
+#
+# This can block some NSFW images for the models that allow it.
+#
+# 0 No
+# 1 Yes
+#
+
+use_safety_checker = 0
+
+#####
+
#
# Auto Save Imagery
#
@@ -144,37 +157,17 @@ saved_images_folder_name = "saved_images"
# Auto Open Browser From Command Prompt
#
-auto_open_browser = 1
-
-####################
-
-#
-# Make Seed Selection A Textbox
-#
-# If you use a slider or number field for the seed, some seeds can't be
-# duplicated using those fields. If you enter a number greater than
-# 9007199254740992, the seed won't reliably be used. Check the text
-# details to see if it was used. This is a technical limitation as of
-# writing this. See the bug report here:
-# https://github.com/gradio-app/gradio/issues/5354
-#
-# Using the slider, and not entering a number, might be the way to get
-# reliable numbers above that number. Just don't then use the up and down
-# arrows in the field to go up or down a number.
-#
-# The way to use seeds higher than that reliably is to set this variable
-# to 1.
-
-make_seed_selection_a_textbox = 0
+auto_open_browser = 0
####################
#
# Include Close Command Prompt / Cancel Button
#
-# This doesn't work well at all. It just closes the command prompt. And
-# it currently isn't canceling image creation either when used. Don't use
-# it.
+# This doesn't work well at all. It just closes the command prompt. I
+# might remove this eventually unless someone knows of a way it can work
+# better, such as stopping the generation without closing the command
+# prompt.
#
enable_close_command_prompt_button = 0
@@ -190,7 +183,7 @@ enable_close_command_prompt_button = 0
# 75 steps. The refiner will then run for 25 steps.
#
-default_use_denoising_start_in_base_model_when_using_refiner = 0
+default_use_denoising_start_in_base_model_when_using_refiner = 1
####################
@@ -219,16 +212,8 @@ log_generation_times = 1
#
# Use Image Gallery
#
-# There is a bug in downloading images:
-# https://github.com/gradio-app/gradio/issues/6486
-# It saves as HTML rather than image.
-#
-# If you use the gallery, you can turn the download button off for now.
-# They can still right click and save the image.
-#
-use_image_gallery = 0
-show_download_button_for_gallery = 0
+use_image_gallery = 1
####################
@@ -262,6 +247,50 @@ show_messages_in_modal_on_page = 0
####################
+#
+# Suppress Hugging Face Hub Offline Status
+#
+# By default, we add messages about the current setting of
+# "HF_HUB_OFFLINE" in
+#
+
+suppress_hugging_face_hub_offline_status = 0
+
+####################
+
+#
+# Add Seed Into Pipe
+#
+# To make generation deterministic. I add the option because the online
+# configuration for the PhotoReal site doesn't do that and it changes
+# things.
+#
+
+default_add_seed_into_pipe = 1
+
+####################
+
+#
+# Max Queue Size
+#
+
+max_queue_size_if_cpu = 1
+max_queue_size_if_torch = 20
+
+####################
+
+#
+# Allow Online Configurations
+#
+# This allows matching what was created on these sites:
+# https://huggingface.co/spaces/Manjushri/SDXL-1.0
+# https://huggingface.co/spaces/Manjushri/PhotoReal-V3.7.5
+#
+
+allow_online_configurations = 0
+
+####################
+
#
# Up Next Is Various Configuration Arrays and Objects
#
@@ -276,7 +305,7 @@ base_model_array = [
]
base_model_names_object = {
- "sdxl": "Stable Diffusion XL 1.0",
+ "sdxl": "Stable Diffusion XL",
"photoreal": "PhotoReal",
"sdxl_turbo": "Stable Diffusion XL Turbo",
"sd_1_5_runwayml": "Stable Diffusion 1.5"
@@ -306,6 +335,26 @@ base_model_names_object = {
#
# Seeds do not match the online PhotoReal version.
#
+# "photoreal_3-8-1"
+#
+# - My customized configurations. (subject to change)
+# "circulus/canvers-real-v3.8.1"
+#
+# "photoreal_3-8"
+#
+# - My customized configurations. (subject to change)
+# "circulus/canvers-real-v3.8"
+#
+# "photoreal_3-7-5"
+#
+# - My customized configurations. (subject to change)
+# "circulus/canvers-real-v3.7.5"
+#
+# "photoreal_3-6"
+#
+# - My customized configurations. (subject to change)
+# "circulus/canvers-realistic-v3.6"
+#
# "photoreal_2023-11-12"
#
# - Valid from November 12th to present.
@@ -334,6 +383,10 @@ base_model_object_of_model_configuration_arrays = {
],
"photoreal": [
"photoreal_default",
+ "photoreal_3-8-1",
+ "photoreal_3-8",
+ "photoreal_3-7-5",
+ "photoreal_3-6",
"photoreal_2023-11-12",
"photoreal_2023-09-01"
],
@@ -348,21 +401,29 @@ base_model_object_of_model_configuration_arrays = {
####################
model_configuration_names_object = {
- "sdxl_default": "1.0 - Default (subject to change)",
+ "sdxl_default": "1.0 - Default",
"sdxl_2023-11-12": "1.0 (2023-11-12 online config)",
"sdxl_2023-09-05": "1.0 (2023-09-05 online config)",
- "photoreal_default": "3.7.5 - Default (subject to change)",
+ "photoreal_default": "3.6 - Default",
+ "photoreal_3-8-1": "3.8.1 - Default",
+ "photoreal_3-8": "3.8 - Default",
+ "photoreal_3-7-5": "3.7.5 - Default",
+ "photoreal_3-6": "3.6 - Default",
"photoreal_2023-11-12": "3.7.5 (2023-11-12 online config)",
"photoreal_2023-09-01": "3.6 (2023-09-01 online config)",
- "sdxl_turbo_default": "Default (subject to change)",
- "sd_1_5_runwayml_default": "1.5 - Default (subject to change)"
+ "sdxl_turbo_default": "Default",
+ "sd_1_5_runwayml_default": "1.5 - Default"
}
model_configuration_links_object = {
"sdxl_default": "stabilityai/stable-diffusion-xl-base-1.0",
"sdxl_2023-11-12": "stabilityai/stable-diffusion-xl-base-1.0",
"sdxl_2023-09-05": "stabilityai/stable-diffusion-xl-base-1.0",
- "photoreal_default": "circulus/canvers-real-v3.7.5",
+ "photoreal_default": "circulus/canvers-realistic-v3.6",
+ "photoreal_3-8-1": "circulus/canvers-real-v3.8.1",
+ "photoreal_3-8": "circulus/canvers-real-v3.8",
+ "photoreal_3-7-5": "circulus/canvers-real-v3.7.5",
+ "photoreal_3-6": "circulus/canvers-realistic-v3.6",
"photoreal_2023-11-12": "circulus/canvers-real-v3.7.5",
"photoreal_2023-09-01": "circulus/canvers-realistic-v3.6",
"sdxl_turbo_default": "stabilityai/sdxl-turbo",
@@ -384,6 +445,13 @@ model_configuration_include_refiner_number_of_steps_object = model_configuration
# "sdxl_2023-09-05": 1
#}
+online_configurations_object = {
+ "sdxl_2023-11-12": 1,
+ "sdxl_2023-09-05": 1,
+ "photoreal_2023-11-12": 1,
+ "photoreal_2023-09-01": 1
+}
+
####################
hugging_face_refiner_partial_path = "stabilityai/stable-diffusion-xl-refiner-1.0"
@@ -423,6 +491,116 @@ base_model_model_configuration_defaults_object = {
# https://huggingface.co/runwayml/stable-diffusion-v1-5
#
+####################
+
+
+
+default_scheduler = "model_default"
+
+
+
+schedulers_array = [
+ "model_default",
+ "ddim",
+ "ddpm",
+ "dpm_solver_multistep",
+ "dpm_solver_multistep_karras_sigmas_true",
+ "dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp",
+ "dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp",
+ "dpm_solver_singlestep",
+ "dpm_solver_singlestep_karras_sigmas_true",
+ "kdpm2_discrete",
+ "kdpm2_discrete_karras_sigmas_true",
+ "kdpm2_ancestral_discrete",
+ "kdpm2_ancestral_discrete_karras_sigmas_true",
+ "euler_discrete",
+ "euler_ancestral_discrete",
+ "heun_discrete",
+ "lms_discrete",
+ "lms_discrete_karras_sigmas_true",
+ "pndm",
+ "pndm_skip_prk_steps_true",
+ "deis_multistep",
+ "dpm_solver_sde",
+ "uni_pc_multistep"
+]
+
+
+
+scheduler_long_names_object = {
+ "model_default": "Model Default",
+ "ddim": "DDIM",
+ "ddpm": "DDPM",
+ "dpm_solver_multistep": "DPM++ 2M (DPMSolverMultistep)",
+ "dpm_solver_multistep_karras_sigmas_true": "DPM++ 2M Karras (DPMSolverMultistep with use_karras_sigmas=True)",
+ "dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE (DPMSolverMultistep with algorithm_type=\"sde-dpmsolver++\")",
+ "dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE Karras (DPMSolverMultistep with use_karras_sigmas=True & algorithm_type=\"sde-dpmsolver++\")",
+ "dpm_solver_singlestep": "DPM++ SDE (DPMSolverSinglestep)",
+ "dpm_solver_singlestep_karras_sigmas_true": "DPM++ SDE Karras (DPMSolverSinglestep with use_karras_sigmas=True)",
+ "kdpm2_discrete": "DPM2 (KDPM2Discrete)",
+ "kdpm2_discrete_karras_sigmas_true": "DPM2 Karras (KDPM2Discrete with use_karras_sigmas=True)",
+ "kdpm2_ancestral_discrete": "DPM2 a (KDPM2AncestralDiscrete)",
+ "kdpm2_ancestral_discrete_karras_sigmas_true": "DPM2 a Karras (KDPM2AncestralDiscrete with use_karras_sigmas=True)",
+ "euler_discrete": "Euler (EulerDiscrete)",
+ "euler_ancestral_discrete": "Euler a (EulerAncestralDiscrete)",
+ "heun_discrete": "Heun (HeunDiscrete)",
+ "lms_discrete": "LMS (LMSDiscrete)",
+ "lms_discrete_karras_sigmas_true": "LMS Karras (LMSDiscrete with use_karras_sigmas=True)",
+ "pndm": "PNDM",
+ "pndm_skip_prk_steps_true": "PNDM (with skip_prk_steps=True) - Close to PLMS",
+ "deis_multistep": "DEISMultistep",
+ "dpm_solver_sde": "DPMSolverSDE",
+ "uni_pc_multistep": "UniPCMultistep"
+}
+
+
+
+scheduler_short_names_object = {
+ "ddim": "DDIM",
+ "ddpm": "DDPM",
+ "dpm_solver_multistep": "DPM++ 2M",
+ "dpm_solver_multistep_karras_sigmas_true": "DPM++ 2M Karras",
+ "dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE",
+ "dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp": "DPM++ 2M SDE Karras",
+ "dpm_solver_singlestep": "DPM++ SDE",
+ "dpm_solver_singlestep_karras_sigmas_true": "DPM++ SDE Karras",
+ "kdpm2_discrete": "DPM2",
+ "kdpm2_discrete_karras_sigmas_true": "DPM2 Karras",
+ "kdpm2_ancestral_discrete": "DPM2 a",
+ "kdpm2_ancestral_discrete_karras_sigmas_true": "DPM2 a Karras",
+ "euler_discrete": "Euler",
+ "euler_ancestral_discrete": "Euler a",
+ "heun_discrete": "Heun",
+ "lms_discrete": "LMS",
+ "lms_discrete_karras_sigmas_true": "LMS Karras",
+ "pndm": "PNDM",
+ "pndm_skip_prk_steps_true": "PNDM (with skip_prk_steps=True) - Close to PLMS",
+ "deis_multistep": "DEISMultistep",
+ "dpm_solver_sde": "DPMSolverSDE",
+ "uni_pc_multistep": "UniPCMultistep"
+}
+
+
+
+scheduler_name_to_identifier_in_app_object = {
+ "DDIMScheduler": "ddim",
+ "DDPMScheduler": "ddpm",
+ "DPMSolverMultistepScheduler": "dpm_solver_multistep",
+ "DPMSolverSinglestepScheduler": "dpm_solver_singlestep",
+ "KDPM2DiscreteScheduler": "kdpm2_discrete",
+ "KDPM2AncestralDiscreteScheduler": "kdpm2_ancestral_discrete",
+ "EulerDiscreteScheduler": "euler_discrete",
+ "EulerAncestralDiscreteScheduler": "euler_ancestral_discrete",
+ "HeunDiscreteScheduler": "heun_discrete",
+ "LMSDiscreteScheduler": "lms_discrete",
+ "PNDMScheduler": "pndm",
+ "DEISMultistepScheduler": "deis_multistep",
+ "DPMSolverSDEScheduler": "dpm_solver_sde",
+ "UniPCMultistepScheduler": "uni_pc_multistep"
+}
+
+
+
####################
#
@@ -457,12 +635,13 @@ default_negative_prompt = ""
default_width = 768
default_height = 768
-default_guidance_scale_value = 7
+default_guidance_scale_value = 7.5
+
+default_seed_value = 0
default_base_model_base_model_num_inference_steps = 50
default_base_model_base_model_num_inference_steps_for_sdxl_turbo = 2
-#default_seed_maximum = 999999999999999999
default_seed_maximum = 1000000000000000000
default_seed_value = 876678173805928800
@@ -485,6 +664,11 @@ default_upscaler_selected = 0
default_refiner_accordion_open = 1
default_upscaler_accordion_open = 1
+# Upscaler Options
+
+maximum_upscaler_steps = 150
+default_upscaler_steps = 50
+
# xFormers:
#
# https://huggingface.co/docs/diffusers/optimization/xformers
@@ -519,19 +703,23 @@ if default_base_model == "photoreal":
# PhotoReal
- default_seed_value = 3648905360627576
+# default_guidance_scale_value = 7
+
+ #default_seed_value = 3648905360627576
+ default_seed_value = 0
elif default_base_model == "sdxl_turbo":
# SDXL Turbo
- default_seed_value = 2725116121543
+ #default_seed_value = 2725116121543
+ default_seed_value = 0
-#elif default_base_model == "sd_1_5_runwayml":
+elif default_base_model == "sd_1_5_runwayml":
# SD 1.5
-
+ default_seed_value = 0
else:
@@ -539,7 +727,12 @@ else:
default_width = 1024
default_height = 1024
- default_guidance_scale_value = 10
+
+# default_guidance_scale_value = 10
+ default_guidance_scale_value = 7.5
+
+ #default_seed_value = 876678173805928800
+ default_seed_value = 0
@@ -550,24 +743,31 @@ width_and_height_input_slider_steps = 8
opening_html = ""
+ending_html = ""
+
+max_queue_size = max_queue_size_if_torch
if device == "cpu":
opening_html = "THIS APP IS EXCEPTIONALLY SLOW!
This app is not running on a GPU. The first time it loads after the space is rebuilt it might take 10 minutes to generate a SDXL Turbo image. It may take 2 to 3 minutes after that point to do two steps. For other models, it may take hours to create a single image."
+ max_queue_size = max_queue_size_if_cpu
-ending_html = """This app allows you to try to match images that can be generated using several tools online. (Stable Diffusion XL, PhotoReal with SDXL 1.0 Refiner and SDXL Turbo Unofficial Demo) You can select the base model you want to use in the first dropdown option. The second configuration option involves choosing which version and/or configuration to choose. Certain configurations try to match the version online, taking into account changes that were made over time. Another configuration involves a default configuration I choose and is subject to change while I am still designing this app.
-Tokens are not individual characters. If the prompt length is too long, the display will notify you what part of the prompt wasn't used. Changing just the image dimensions alone will change the image generated. For some models, trying to make a large image, such as 1024x1024, may add extra people and come out worse than using smaller dimensions. If you have a seed greater than 9007199254740992, it may not be processed correctly. Make sure the prompt matches the seed you entered. (shown in the \"Prompt Information\" section once you create an image) If it doesn't, set \"make_seed_selection_a_textbox\" to 1 in the script. This bug is described here.
+if allow_online_configurations == 1:
+
+ ending_html = """This app allows you to try to match images that can be generated using several tools online. (Stable Diffusion XL, PhotoReal with SDXL 1.0 Refiner and SDXL Turbo Unofficial Demo) You can select the base model you want to use in the first dropdown option. The second configuration option involves choosing which version and/or configuration to choose. Certain configurations try to match the version online, taking into account changes that were made over time. Another configuration involves a default configuration I choose and is subject to change while I am still designing this app.
+
+"""
-The original script for this app was written by Manjushri."""
+ending_html += """Tokens are not individual characters. If the prompt length is too long, the display will notify you what part of the prompt wasn't used. Changing just the image dimensions alone will change the image generated. For some models, trying to make a large image, such as 1024x1024, may add extra people and come out worse than using smaller dimensions.
+
+The original script for this app was written by Manjushri."""
-refiner_and_upscaler_status_opening_html = "
"
-refiner_and_upscaler_status_closing_html = "
"
refiner_on_text = "Refiner is on. "
refiner_off_text = "Refiner is off. "
@@ -608,15 +808,53 @@ if script_being_run_on_hugging_face == 1:
show_messages_in_modal_on_page = 0
show_messages_in_command_prompt = 1
+ allow_online_configurations = 0
+
if device == "cpu":
show_image_creation_progress_log = 1
+ default_width = 768
+ default_height = 768
+
ending_html = """
If you would like to download this app to run offline on a Windows computer that has a NVIDIA graphics card, click here to download it.
""" + ending_html
+ # If on HuggingFace, I reduce what is available.
+
+
+
+default_width = 768
+default_height = 768
+
+if allow_online_configurations == 0:
+
+ base_model_array = [
+ "sdxl",
+ "photoreal",
+ "sdxl_turbo"
+ ]
+
+ base_model_object_of_model_configuration_arrays = {
+ "sdxl": [
+ "sdxl_default"
+ ],
+ "photoreal": [
+ "photoreal_default"
+ ],
+ "sdxl_turbo": [
+ "sdxl_turbo_default"
+ ]
+ }
+
+ base_model_model_configuration_defaults_object = {
+ "sdxl": "sdxl_default",
+ "photoreal": "photoreal_default",
+ "sdxl_turbo": "sdxl_turbo_default"
+ }
+
hugging_face_hub_is_offline = 0
@@ -630,9 +868,15 @@ if script_being_run_on_hugging_face == 0:
hugging_face_hub_is_offline = 1
- if hugging_face_hub_is_offline == 0:
+ if suppress_hugging_face_hub_offline_status == 1:
+
+ if hugging_face_hub_is_offline == 0:
- print ("Note: The Hugging Face cache directory does not automatically delete older data. Over time, it could eventually grow to use all the space on the drive it is on. You either need to manually clean out the folder occasionally or see Instructons.txt on how to not automatically update data once you have downloaded everything you need.")
+ print ("Note: The Hugging Face cache directory does not automatically delete older data. Over time, it could eventually grow to use all the space on the drive it is on. You either need to manually clean out the folder occasionally or see Instructons.txt on how to not automatically update data once you have downloaded everything you need.")
+
+ else:
+
+ print ("You are working offline. Data will not be downloaded. See \"ai_image_creation.bat\" or \"Instructions.txt\" for more info.")
@@ -664,7 +908,10 @@ if auto_save_imagery == 1:
-if log_generation_times == 1:
+if (
+ (log_generation_times == 1) or
+ (show_image_creation_progress_log == 1)
+):
import time
@@ -719,17 +966,6 @@ def error_function(
-# Don't change this one
-
-default_model_configuration_object = {
- "sdxl_default": 1,
- "photoreal_default": 1,
- "sdxl_turbo_default": 1,
- "sd_1_5_runwayml_default": 1
-}
-
-
-
additional_prompt_info_html = ""
if auto_save_imagery == 1:
@@ -788,6 +1024,17 @@ default_model_configuration_nicely_named_value = model_configuration_names_objec
+if not (
+ default_scheduler and
+ default_scheduler in scheduler_long_names_object
+):
+
+ error_function("A default scheduler must be properly configured in the code.")
+
+default_scheduler_nicely_named_value = scheduler_long_names_object[default_scheduler]
+
+
+
if enable_refiner != 1:
default_refiner_selected = 0
@@ -821,13 +1068,13 @@ if default_upscaler_selected == 1:
default_upscale_option = "Yes"
-is_default_config = 0
+is_default_config = 1
-if default_model_configuration in default_model_configuration_object:
+if default_model_configuration in online_configurations_object:
- is_default_config = 1
+ is_default_config = 0
-default_refiner_and_upscaler_status_text = refiner_and_upscaler_status_opening_html
+default_refiner_and_upscaler_status_text = ""
@@ -930,49 +1177,37 @@ if enable_upscaler == 1:
-default_refiner_and_upscaler_status_text += refiner_and_upscaler_status_closing_html
-
-
-
-image_gallery_array = []
-prompt_information_array = []
-
-
-
-default_negative_prompt_field_visibility = True
-default_negative_prompt_for_sdxl_turbo_field_visibility = False
-default_base_model_num_inference_steps_field_visibility = True
-default_base_model_num_inference_steps_field_for_sdxl_turbo_field_visibility = False
-default_guidance_scale_field_visibility = True
-default_guidance_scale_for_sdxl_turbo_field_visibility = False
+default_negative_prompt_field_row_visibility = True
+default_negative_prompt_for_sdxl_turbo_field_row_visibility = False
+default_base_model_num_inference_steps_field_row_visibility = True
+default_base_model_num_inference_steps_field_for_sdxl_turbo_field_row_visibility = False
+default_guidance_scale_field_row_visibility = True
+default_guidance_scale_for_sdxl_turbo_field_row_visibility = False
if default_base_model == "sdxl_turbo":
- default_negative_prompt_field_visibility = False
- default_negative_prompt_for_sdxl_turbo_field_visibility = True
- default_base_model_num_inference_steps_field_visibility = False
- default_base_model_num_inference_steps_field_for_sdxl_turbo_field_visibility = True
- default_guidance_scale_field_visibility = False
- default_guidance_scale_for_sdxl_turbo_field_visibility = True
+ default_negative_prompt_field_row_visibility = False
+ default_negative_prompt_for_sdxl_turbo_field_row_visibility = True
+ default_base_model_num_inference_steps_field_row_visibility = False
+ default_base_model_num_inference_steps_field_for_sdxl_turbo_field_row_visibility = True
+ default_guidance_scale_field_row_visibility = False
+ default_guidance_scale_for_sdxl_turbo_field_row_visibility = True
-last_model_configuration_name_value = ""
-last_refiner_selected = ""
-last_upscaler_selected = ""
+default_add_seed_into_pipe_field_row_visibility = False
+if is_default_config == 1:
+ default_add_seed_into_pipe_field_row_visibility = True
-if show_image_creation_progress_log == 1:
- import time
+default_add_seed_into_pipe_is_selected = False
+if default_add_seed_into_pipe == 1:
-current_progress_text = ""
-current_actual_total_base_model_steps = ""
-current_actual_total_refiner_steps = ""
-current_actual_total_upscaler_steps = ""
+ default_add_seed_into_pipe_is_selected = True
@@ -992,6 +1227,24 @@ for this_base_model in base_model_array:
+default_scheduler_choices_array = []
+
+for this_scheduler in schedulers_array:
+
+ default_scheduler_choices_array.append(
+ scheduler_long_names_object[this_scheduler]
+ )
+
+
+
+make_seed_selection_a_textbox = 1
+
+if default_seed_maximum <= 9007199254740992:
+
+ make_seed_selection_a_textbox = 0
+
+
+
###############################################################################
###############################################################################
#
@@ -1132,6 +1385,7 @@ def numerical_bool(
#####################
def truncate_prompt (
+ pipe,
existing_prompt_text
):
@@ -1187,29 +1441,90 @@ def truncate_prompt (
#####################
#
-# Update Prompt Info From Gallery
+# Construct Pipe
#
-# If you select an image in the image gallery, display the prompt
-# information for that image.
+# Prepare the base model.
#
#####################
-def update_prompt_info_from_gallery (
- gallery_data: gr.SelectData
+def construct_pipe (
+ base_model_name_value,
+ model_configuration_name_value
):
- gallery_data_index = gallery_data.index
+ if device == "cuda":
+ torch.cuda.empty_cache()
- output_image_field_update = gr.Gallery(
- selected_index = gallery_data_index
+ base_model_kwargs = {}
+
+ if (
+ (base_model_name_value == "sdxl") or
+ (base_model_name_value == "photoreal") or
+ (base_model_name_value == "sdxl_turbo") or
+ (base_model_name_value == "sd_1_5_runwayml")
+ ):
+
+ base_model_kwargs["use_safetensors"] = True
+
+ if use_safety_checker == 0:
+
+ if (
+ (base_model_name_value == "photoreal") or
+ (base_model_name_value == "sd_1_5_runwayml")
+ ):
+
+ base_model_kwargs = {
+ "safety_checker": None,
+ "requires_safety_checker": False
+ }
+
+ if device == "cuda":
+
+ if (
+ (base_model_name_value == "sdxl") or
+ (base_model_name_value == "sdxl_turbo") or
+ (base_model_name_value == "sd_1_5_runwayml")
+ ):
+
+ base_model_kwargs["variant"] = "fp16"
+
+ base_model_kwargs["torch_dtype"] = torch.float16
+
+ if use_custom_hugging_face_cache_dir == 1:
+
+ base_model_kwargs["cache_dir"] = hugging_face_cache_dir
+
+ pipe = DiffusionPipeline.from_pretrained(
+ model_configuration_links_object[model_configuration_name_value],
+ **base_model_kwargs
)
- output_text_field_update = prompt_information_array[gallery_data_index]
+ if use_model_cpu_offload_for_base_model == 1:
+ pipe.enable_model_cpu_offload()
- return {
- output_image_field: output_image_field_update,
- output_text_field: output_text_field_update
- }
+ if use_xformers == 1:
+ pipe.enable_xformers_memory_efficient_attention()
+
+ pipe = pipe.to(device)
+
+ if use_sequential_cpu_offload_for_base_model == 1:
+ pipe.enable_sequential_cpu_offload()
+
+ if use_default_attn_processor == 1:
+ pipe.unet.set_default_attn_processor()
+
+ if device == "cuda":
+ torch.cuda.empty_cache()
+ else:
+ pipe.unet = torch.compile(
+ pipe.unet,
+ mode = "reduce-overhead",
+ fullgraph = True
+ )
+
+ return (
+ pipe
+ )
@@ -1219,290 +1534,245 @@ def update_prompt_info_from_gallery (
#####################
#
-# Callback Function for Base Model Progress
-#
-# Add the current step the generation is on in the base model to the web
-# interface.
+# Configure Scheduler
#
#####################
-def callback_function_for_base_model_progress(
- callback_pipe,
- callback_step_index,
- callback_timestep,
- callback_kwargs
+def configure_scheduler (
+ pipe,
+ scheduler_value
):
- global current_progress_text
+ scheduler_config = pipe.scheduler.config
- global current_base_model_generation_start_time
+ scheduler = scheduler_value
- current_progress_text = "Base model steps complete... " + str(callback_step_index) + " of " + str(current_actual_total_base_model_steps)
- if int(callback_step_index) == 0:
- current_base_model_generation_start_time = time.time()
+ if scheduler_value == "model_default":
- if int(callback_step_index) > 0:
+ scheduler_name = pipe.scheduler.config._class_name
- seconds_per_step = ((time.time() - current_base_model_generation_start_time) / int(callback_step_index))
+ if scheduler_name in scheduler_name_to_identifier_in_app_object:
- (
- time_per_step_hours,
- time_per_step_minutes,
- time_per_step_seconds
- ) = convert_seconds(seconds_per_step)
+ scheduler = scheduler_name_to_identifier_in_app_object[scheduler_name]
- if time_per_step_hours > 0:
- hours_text = "hr"
- if time_per_step_hours > 1:
+ scheduler_used = scheduler
- hours_text = "hrs"
- nice_time_per_step = str(int(time_per_step_hours)) + " " + hours_text + ". " + str(int(time_per_step_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec."
- elif time_per_step_minutes > 0:
+ if scheduler == "ddim":
- nice_time_per_step = str(int(time_per_step_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec."
+ from diffusers import DDIMScheduler
+ pipe.scheduler = DDIMScheduler.from_config(scheduler_config)
- else:
+ elif scheduler == "ddpm":
- nice_time_per_step = str(round(time_per_step_seconds, 2)) + " sec."
+ from diffusers import DDPMScheduler
+ pipe.scheduler = DDPMScheduler.from_config(scheduler_config)
- current_progress_text += "\n" + nice_time_per_step + " per step"
+ elif scheduler == "dpm_solver_multistep":
- return {}
+ from diffusers import DPMSolverMultistepScheduler
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(scheduler_config)
+ elif scheduler == "dpm_solver_multistep_karras_sigmas_true":
+ new_scheduler_config = dict(pipe.scheduler.config)
+ new_scheduler_config.update({"use_karras_sigmas": True})
+ from diffusers import DPMSolverMultistepScheduler
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(new_scheduler_config)
+ elif scheduler == "dpm_solver_multistep_algorithm_type_sde-dpmsolver_pp":
+ new_scheduler_config = dict(pipe.scheduler.config)
+ new_scheduler_config.update({"algorithm_type": "sde-dpmsolver++"})
+ from diffusers import DPMSolverMultistepScheduler
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(new_scheduler_config)
-#####################
-#
-# Callback Function for Refiner Progress
-#
-# Add the current step the generation is on in the refiner to the web
-# interface.
-#
-#####################
+ elif scheduler == "dpm_solver_multistep_karras_sigmas_true_algorithm_type_sde-dpmsolver_pp":
-def callback_function_for_refiner_progress(
- callback_pipe,
- callback_step_index,
- callback_timestep,
- callback_kwargs
-):
+ new_scheduler_config = dict(pipe.scheduler.config)
+ new_scheduler_config.update({"use_karras_sigmas": True})
+ new_scheduler_config.update({"algorithm_type": "sde-dpmsolver++"})
- global current_progress_text
+ from diffusers import DPMSolverMultistepScheduler
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(new_scheduler_config)
- global current_refiner_generation_start_time
+ elif scheduler == "dpm_solver_singlestep":
- current_progress_text = "Refiner steps complete... " + str(callback_step_index) + " of " + str(current_actual_total_refiner_steps)
+ from diffusers import DPMSolverSinglestepScheduler
+ pipe.scheduler = DPMSolverSinglestepScheduler.from_config(scheduler_config)
- if int(callback_step_index) == 0:
+ elif scheduler == "dpm_solver_singlestep_karras_sigmas_true":
- current_refiner_generation_start_time = time.time()
+ new_scheduler_config = dict(pipe.scheduler.config)
+ new_scheduler_config.update({"use_karras_sigmas": True})
- if int(callback_step_index) > 0:
+ from diffusers import DPMSolverSinglestepScheduler
+ pipe.scheduler = DPMSolverSinglestepScheduler.from_config(new_scheduler_config)
- seconds_per_step = ((time.time() - current_refiner_generation_start_time) / int(callback_step_index))
+ elif scheduler == "kdpm2_discrete":
- (
- time_per_step_hours,
- time_per_step_minutes,
- time_per_step_seconds
- ) = convert_seconds(seconds_per_step)
+ from diffusers import KDPM2DiscreteScheduler
+ pipe.scheduler = KDPM2DiscreteScheduler.from_config(scheduler_config)
- if time_per_step_hours > 0:
+ elif scheduler == "kdpm2_discrete_karras_sigmas_true":
- hours_text = "hr"
+ new_scheduler_config = dict(pipe.scheduler.config)
+ new_scheduler_config.update({"use_karras_sigmas": True})
- if time_per_step_hours > 1:
+ from diffusers import KDPM2DiscreteScheduler
+ pipe.scheduler = KDPM2DiscreteScheduler.from_config(new_scheduler_config)
- hours_text = "hrs"
+ elif scheduler == "kdpm2_ancestral_discrete":
- nice_time_per_step = str(int(time_per_step_hours)) + " " + hours_text + ". " + str(int(time_per_step_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec."
+ from diffusers import KDPM2AncestralDiscreteScheduler
+ pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(scheduler_config)
- elif time_per_step_minutes > 0:
+ elif scheduler == "kdpm2_ancestral_discrete_karras_sigmas_true":
- nice_time_per_step = str(int(time_per_step_minutes)) + " min. " + str(round(generation_partial_seconds, 1)) + " sec."
+ new_scheduler_config = dict(pipe.scheduler.config)
+ new_scheduler_config.update({"use_karras_sigmas": True})
- else:
+ from diffusers import KDPM2AncestralDiscreteScheduler
+ pipe.scheduler = KDPM2AncestralDiscreteScheduler.from_config(new_scheduler_config)
- nice_time_per_step = str(round(time_per_step_seconds, 2)) + " sec."
+ elif scheduler == "euler_discrete":
- current_progress_text += "\n" + nice_time_per_step + " per step"
+ from diffusers import EulerDiscreteScheduler
+ pipe.scheduler = EulerDiscreteScheduler.from_config(scheduler_config)
- return {}
+ elif scheduler == "euler_ancestral_discrete":
+ from diffusers import EulerAncestralDiscreteScheduler
+ pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(scheduler_config)
+ elif scheduler == "heun_discrete":
+ from diffusers import HeunDiscreteScheduler
+ pipe.scheduler = HeunDiscreteScheduler.from_config(scheduler_config)
+ elif scheduler == "lms_discrete":
+ from diffusers import LMSDiscreteScheduler
+ pipe.scheduler = LMSDiscreteScheduler.from_config(scheduler_config)
+ elif scheduler == "lms_discrete_karras_sigmas_true":
-#####################
-#
-# Update Log Progress
-#
-# This is called every second when "show_image_creation_progress_log" is
-# set to 1. It displays the latest value in "current_progress_text".
-#
-#####################
+ new_scheduler_config = dict(pipe.scheduler.config)
+ new_scheduler_config.update({"use_karras_sigmas": True})
-def update_log_progress ():
+ from diffusers import LMSDiscreteScheduler
+ pipe.scheduler = LMSDiscreteScheduler.from_config(new_scheduler_config)
- global current_progress_text
+ elif scheduler == "pndm":
- log_text_field_update = gr.Textbox(
- value = current_progress_text
- )
+ from diffusers import PNDMScheduler
+ pipe.scheduler = PNDMScheduler.from_config(scheduler_config)
- return {
- log_text_field: log_text_field_update
- }
+ elif scheduler == "pndm_skip_prk_steps_true":
+ new_scheduler_config = dict(pipe.scheduler.config)
+ new_scheduler_config.update({"skip_prk_steps": True})
+ from diffusers import PNDMScheduler
+ pipe.scheduler = PNDMScheduler.from_config(new_scheduler_config)
+ elif scheduler == "deis_multistep":
+ from diffusers import DEISMultistepScheduler
+ pipe.scheduler = DEISMultistepScheduler.from_config(scheduler_config)
+ elif scheduler == "dpm_solver_sde":
-#####################
-#
-# Toggle Interactivity During Image Creation
-#
-#
-#
-#####################
+ from diffusers import DPMSolverSDEScheduler
+ pipe.scheduler = DPMSolverSDEScheduler.from_config(scheduler_config)
-def toggle_interactivity_during_image_creation (
- interactivity
-):
+ elif scheduler == "uni_pc_multistep":
- if script_being_run_on_hugging_face == 1:
+ from diffusers import UniPCMultistepScheduler
+ pipe.scheduler = UniPCMultistepScheduler.from_config(scheduler_config)
- prompt_field_update = gr.Textbox(
- interactive = interactivity,
- )
+ else:
- negative_prompt_field_update = gr.Textbox(
- interactive = interactivity
- )
+ from diffusers import PNDMScheduler
+ pipe.scheduler = PNDMScheduler.from_config(scheduler_config)
- else:
+ scheduler_used = "pndm"
- prompt_field_update = gr.Textbox()
- negative_prompt_field_update = gr.Textbox()
- refining_selection_default_config_field_update = gr.Radio(
- interactive = interactivity
+ return (
+ scheduler_used
)
- refining_denoise_start_for_default_config_field_update = gr.Slider(
- interactive = interactivity
- )
- refining_use_denoising_start_in_base_model_when_using_refiner_field_update = gr.Checkbox(
- interactive = interactivity
- )
- refining_base_model_output_to_refiner_is_in_latent_space_field_update = gr.Checkbox(
- interactive = interactivity
- )
- refining_selection_online_config_normal_field_update = gr.Radio(
- interactive = interactivity
- )
- refining_selection_online_config_automatically_selected_field_update = gr.Radio(
- interactive = interactivity
- )
+#####################
+#
+# Construct Refiner
+#
+# Prepare the refiner.
+#
+#####################
- refining_denoise_start_for_online_config_field_update = gr.Slider(
- interactive = interactivity
- )
+def construct_refiner ():
- refining_number_of_iterations_for_online_config_field_update = gr.Slider(
- interactive = interactivity
- )
+ refiner_kwargs = {
+ "use_safetensors": True
+ }
- upscaling_selection_field_update = gr.Radio(
- interactive = interactivity
- )
+ if device == "cuda":
- upscaling_num_inference_steps_field_update = gr.Slider(
- interactive = interactivity
- )
+ refiner_kwargs["variant"] = "fp16"
+ refiner_kwargs["torch_dtype"] = torch.float16
- base_model_field_update = gr.Dropdown(
- interactive = interactivity
- )
+ if use_custom_hugging_face_cache_dir == 1:
- model_configuration_field_update = gr.Dropdown(
- interactive = interactivity
- )
+ refiner_kwargs["cache_dir"] = hugging_face_cache_dir
- image_width_field_update = gr.Slider(
- interactive = interactivity
+ refiner = DiffusionPipeline.from_pretrained(
+ hugging_face_refiner_partial_path,
+ **refiner_kwargs
)
- image_height_field_update = gr.Slider(
- interactive = interactivity
- )
+ if use_model_cpu_offload_for_refiner == 1:
- base_model_num_inference_steps_field_update = gr.Slider(
- interactive = interactivity
- )
+ refiner.enable_model_cpu_offload()
- base_model_num_inference_steps_field_for_sdxl_turbo_field_update = gr.Slider(
- interactive = interactivity
- )
+ if use_xformers == 1:
- guidance_scale_field_update = gr.Slider(
- interactive = interactivity
- )
+ refiner.enable_xformers_memory_efficient_attention()
- if make_seed_selection_a_textbox == 1:
+ refiner = refiner.to(device)
- seed_field_update = gr.Textbox(
- interactive = interactivity
- )
+ if use_sequential_cpu_offload_for_refiner == 1:
- else:
+ refiner.enable_sequential_cpu_offload()
- seed_field_update = gr.Slider(
- interactive = interactivity
- )
+ if use_default_attn_processor == 1:
- return {
- prompt_field: prompt_field_update,
- negative_prompt_field: negative_prompt_field_update,
- refining_selection_default_config_field: refining_selection_default_config_field_update,
- refining_denoise_start_for_default_config_field: refining_denoise_start_for_default_config_field_update,
- refining_use_denoising_start_in_base_model_when_using_refiner_field: refining_use_denoising_start_in_base_model_when_using_refiner_field_update,
- refining_base_model_output_to_refiner_is_in_latent_space_field: refining_base_model_output_to_refiner_is_in_latent_space_field_update,
- refining_selection_online_config_normal_field: refining_selection_online_config_normal_field_update,
- refining_selection_online_config_automatically_selected_field: refining_selection_online_config_automatically_selected_field_update,
- refining_denoise_start_for_online_config_field: refining_denoise_start_for_online_config_field_update,
- refining_number_of_iterations_for_online_config_field: refining_number_of_iterations_for_online_config_field_update,
- upscaling_selection_field: upscaling_selection_field_update,
- upscaling_num_inference_steps_field: upscaling_num_inference_steps_field_update,
- base_model_field: base_model_field_update,
- model_configuration_field: model_configuration_field_update,
- image_width_field: image_width_field_update,
- image_height_field: image_height_field_update,
- base_model_num_inference_steps_field: base_model_num_inference_steps_field_update,
- base_model_num_inference_steps_field_for_sdxl_turbo_field: base_model_num_inference_steps_field_for_sdxl_turbo_field_update,
- guidance_scale_field: guidance_scale_field_update,
- seed_field: seed_field_update
- }
+ refiner.unet.set_default_attn_processor()
+ if device == "cuda":
+ torch.cuda.empty_cache()
+ else:
+ refiner.unet = torch.compile(
+ refiner.unet,
+ mode = "reduce-overhead",
+ fullgraph = True
+ )
+ return (
+ refiner
+ )
@@ -1510,39 +1780,123 @@ def toggle_interactivity_during_image_creation (
#####################
#
-# Before Create Image Function
+# Construct Upscaler
#
-# This is loaded before the image creation begins.
+# Prepare the upscaler.
#
#####################
-def before_create_image_function ():
+def construct_upscaler ():
- generate_image_btn_update = gr.Button(
- value = "Generating...",
- variant = "secondary",
- interactive = False
+ upscaler_kwargs = {
+ "use_safetensors": True
+ }
+
+ if device == "cuda":
+
+ upscaler_kwargs["torch_dtype"] = torch.float16
+
+ if use_custom_hugging_face_cache_dir == 1:
+
+ upscaler_kwargs["cache_dir"] = hugging_face_cache_dir
+
+ upscaler = DiffusionPipeline.from_pretrained(
+ hugging_face_upscaler_partial_path,
+ **upscaler_kwargs
)
- output_text_field_update = gr.Textbox(
- visible = False
+ if use_model_cpu_offload_for_upscaler == 1:
+
+ upscaler.enable_model_cpu_offload()
+
+ if use_xformers == 1:
+
+ upscaler.enable_xformers_memory_efficient_attention()
+
+ upscaler = upscaler.to(device)
+
+ if use_sequential_cpu_offload_for_upscaler == 1:
+
+ upscaler.enable_sequential_cpu_offload()
+
+ if use_default_attn_processor == 1:
+
+ upscaler.unet.set_default_attn_processor()
+
+ if device == "cuda":
+ torch.cuda.empty_cache()
+ else:
+ upscaler.unet = torch.compile(
+ upscaler.unet,
+ mode = "reduce-overhead",
+ fullgraph = True
+ )
+
+ return (
+ upscaler
)
- log_text_field_update = gr.Textbox(
- value = "",
- visible = True,
- every = 1
+
+
+
+
+
+#####################
+#
+# Update Prompt Info From Gallery
+#
+# If you select an image in the image gallery, display the prompt
+# information for that image.
+#
+#####################
+
+def update_prompt_info_from_gallery (
+ gallery_data: gr.SelectData,
+ image_gallery_array_state_value
+):
+
+ gallery_data_index = gallery_data.index
+
+ output_image_gallery_field_update = gr.Gallery(
+ selected_index = gallery_data_index
)
-# toggled_interactivity_fields_kwargs = toggle_interactivity_during_image_creation(False)
+ output_text_field_update = image_gallery_array_state_value[gallery_data_index]
+
+ return {
+ output_image_gallery_field: output_image_gallery_field_update,
+ output_text_field: output_text_field_update
+ }
+
+
+
+
+
+
+
+#####################
+#
+# Before Create Image Function
+#
+# This is loaded before the image creation begins.
+#
+#####################
+
+def before_create_image_function ():
+
+ generate_image_btn_update = gr.Button(
+ value = "Generating...",
+ variant = "secondary",
+ interactive = False
+ )
- toggled_interactivity_fields_kwargs = {}
+ output_text_field_update = gr.Textbox(
+ visible = False
+ )
return {
generate_image_btn: generate_image_btn_update,
- output_text_field: output_text_field_update,
- log_text_field: log_text_field_update,
- **toggled_interactivity_fields_kwargs
+ output_text_field: output_text_field_update
}
@@ -1571,21 +1925,9 @@ def after_create_image_function ():
visible = True
)
- log_text_field_update = gr.Textbox(
- value = "",
- visible = False,
- every = None
- )
-
-# toggled_interactivity_fields_kwargs = toggle_interactivity_during_image_creation(True)
-
- toggled_interactivity_fields_kwargs = {}
-
return {
generate_image_btn: generate_image_btn_update,
- output_text_field: output_text_field_update,
- log_text_field: log_text_field_update,
- **toggled_interactivity_fields_kwargs
+ output_text_field: output_text_field_update
}
@@ -1604,16 +1946,20 @@ def after_create_image_function ():
#####################
def create_image_function (
+
base_model_field_index,
- model_configuration_field_index,
prompt_text,
negative_prompt_text,
+
+ scheduler_index,
+
image_width,
image_height,
guidance_scale,
base_model_num_inference_steps,
base_model_num_inference_steps_field_for_sdxl_turbo,
actual_seed,
+ add_seed_into_pipe,
refining_selection_default_config_field_value,
refining_selection_online_config_normal_field_value,
@@ -1627,17 +1973,40 @@ def create_image_function (
refining_number_of_iterations_for_online_config_field_value,
upscaling_selection_field_value,
- upscaling_num_inference_steps
+ upscaling_num_inference_steps,
+
+ image_gallery_array_state_value,
+ prompt_information_array_state_value,
+
+ last_model_configuration_name_selected_state_value,
+ last_refiner_name_selected_state_value,
+ last_upscaler_name_selected_state_value,
+
+ stored_pipe_state,
+ stored_refiner_state,
+ stored_upscaler_state,
+
+ *model_configuration_dropdown_fields_array,
+
+ progress = gr.Progress()
+
):
- global current_progress_text
- global current_actual_total_base_model_steps
- global current_actual_total_refiner_steps
+ position_in_array = 0
- current_progress_text = ""
- current_actual_total_base_model_steps = 0
- current_actual_total_refiner_steps = 0
- current_actual_total_upscaler_steps = 0
+ model_configuration_field_object = {}
+
+ for model_configuration_field_index in model_configuration_dropdown_fields_array:
+
+ this_base_model = base_model_array[position_in_array]
+
+ model_configuration_field_object[this_base_model] = model_configuration_field_index
+
+ position_in_array += 1
+
+
+
+ add_seed_into_pipe = numerical_bool(add_seed_into_pipe)
refining_selection_default_config_field_value = numerical_bool(refining_selection_default_config_field_value)
refining_selection_online_config_normal_field_value = numerical_bool(refining_selection_online_config_normal_field_value)
@@ -1654,20 +2023,27 @@ def create_image_function (
base_model_name_value = base_model_array[base_model_field_index]
+
+ model_configuration_field_index = model_configuration_field_object[base_model_name_value]
model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_name_value][model_configuration_field_index]
+ scheduler_value = schedulers_array[scheduler_index]
+
current_actual_total_base_model_steps = base_model_num_inference_steps
- current_actual_total_upscaler_steps = upscaling_num_inference_steps
- is_default_config_state = 0
+ current_actual_total_refiner_steps = 0
+
+
+
+ is_default_config_state = 1
- if model_configuration_name_value in default_model_configuration_object:
+ if model_configuration_name_value in online_configurations_object:
- is_default_config_state = 1
+ is_default_config_state = 0
@@ -1704,210 +2080,88 @@ def create_image_function (
- global last_model_configuration_name_value
-
- global pipe
- global refiner
- global upscaler
-
- global image_gallery_array
- global prompt_information_array
-
if (
- (last_model_configuration_name_value == "") or
- (model_configuration_name_value != last_model_configuration_name_value)
+ (last_model_configuration_name_selected_state_value == "") or
+ (model_configuration_name_value != last_model_configuration_name_selected_state_value)
):
- current_progress_text = "Base model is loading."
- show_message(current_progress_text)
+ if (last_model_configuration_name_selected_state_value != ""):
- if (last_model_configuration_name_value != ""):
-
-# del pipe
- if 'pipe' in globals():
+ if "pipe" in globals():
del pipe
- if 'refiner' in globals():
- del refiner
-
- if 'upscaler' in globals():
- del upscaler
-
- import gc
-
- gc.collect()
-
- if device == "cuda":
- torch.cuda.empty_cache()
-
- if base_model_name_value == "photoreal":
-
- base_model_kwargs = {
- "safety_checker": None,
- "requires_safety_checker": False
- }
-
- elif base_model_name_value == "sdxl_turbo":
-
- base_model_kwargs = {
- "use_safetensors": True,
- "safety_checker": None
- }
-
- if device == "cuda":
-
- base_model_kwargs["variant"] = "fp16"
-
- else:
-
- base_model_kwargs = {
- "use_safetensors": True
- }
-
- if device == "cuda":
-
- base_model_kwargs["variant"] = "fp16"
-
- if device == "cuda":
-
- base_model_kwargs["torch_dtype"] = torch.float16
-
- if use_custom_hugging_face_cache_dir == 1:
-
- base_model_kwargs["cache_dir"] = hugging_face_cache_dir
-
- pipe = DiffusionPipeline.from_pretrained(
- model_configuration_links_object[model_configuration_name_value],
- **base_model_kwargs
+ progress(
+ progress = 0,
+ desc = "Base model is loading"
)
- if use_model_cpu_offload_for_base_model == 1:
- pipe.enable_model_cpu_offload()
+ (
+ pipe
+ ) = construct_pipe(
+ base_model_name_value,
+ model_configuration_name_value
+ )
- if use_xformers == 1:
- pipe.enable_xformers_memory_efficient_attention()
+ last_model_configuration_name_selected_state_value = model_configuration_name_value
- pipe = pipe.to(device)
+ else:
- if use_sequential_cpu_offload_for_base_model == 1:
- pipe.enable_sequential_cpu_offload()
+ pipe = stored_pipe_state
- if use_default_attn_processor == 1:
- pipe.unet.set_default_attn_processor()
- if device == "cuda":
- torch.cuda.empty_cache()
- else:
- pipe.unet = torch.compile(
- pipe.unet,
- mode = "reduce-overhead",
- fullgraph = True
- )
- last_model_configuration_name_value = model_configuration_name_value
+ (
+ scheduler_used
+ ) = configure_scheduler(
+ pipe,
+ scheduler_value
+ )
if use_refiner == 1:
- current_progress_text = "Refiner is loading."
- show_message(current_progress_text)
-
- refiner_kwargs = {
- "use_safetensors": True
- }
-
- if device == "cuda":
-
- refiner_kwargs["variant"] = "fp16"
- refiner_kwargs["torch_dtype"] = torch.float16
-
- if use_custom_hugging_face_cache_dir == 1:
-
- refiner_kwargs["cache_dir"] = hugging_face_cache_dir
-
- refiner = DiffusionPipeline.from_pretrained(
- hugging_face_refiner_partial_path,
- **refiner_kwargs
- )
-
- if use_model_cpu_offload_for_refiner == 1:
-
- refiner.enable_model_cpu_offload()
-
- if use_xformers == 1:
-
- refiner.enable_xformers_memory_efficient_attention()
-
- refiner = refiner.to(device)
-
- if use_sequential_cpu_offload_for_refiner == 1:
-
- refiner.enable_sequential_cpu_offload()
-
- if use_default_attn_processor == 1:
-
- refiner.unet.set_default_attn_processor()
+ if (last_refiner_name_selected_state_value == ""):
- if device == "cuda":
- torch.cuda.empty_cache()
- else:
- refiner.unet = torch.compile(
- refiner.unet,
- mode = "reduce-overhead",
- fullgraph = True
+ progress(
+ progress = 0,
+ desc = "Refiner is loading"
)
+ refiner = construct_refiner()
+ last_refiner_name_selected_state_value = "refiner"
- if use_upscaler == 1:
-
- current_progress_text = "Upscaler is loading."
- show_message(current_progress_text)
-
- upscaler_kwargs = {
- "use_safetensors": True
- }
-
- if device == "cuda":
+ else:
- upscaler_kwargs["torch_dtype"] = torch.float16
+ refiner = stored_refiner_state
- if use_custom_hugging_face_cache_dir == 1:
+ else:
- upscaler_kwargs["cache_dir"] = hugging_face_cache_dir
+ refiner = {}
- upscaler = DiffusionPipeline.from_pretrained(
- hugging_face_upscaler_partial_path,
- **upscaler_kwargs
- )
- if use_model_cpu_offload_for_upscaler == 1:
- upscaler.enable_model_cpu_offload()
+ if use_upscaler == 1:
- if use_xformers == 1:
+ if (last_upscaler_name_selected_state_value == ""):
- upscaler.enable_xformers_memory_efficient_attention()
+ progress(
+ progress = 0,
+ desc = "Upscaler is loading"
+ )
- upscaler = upscaler.to(device)
+ upscaler = construct_upscaler()
- if use_sequential_cpu_offload_for_upscaler == 1:
+ last_upscaler_name_selected_state_value = "upscaler"
- upscaler.enable_sequential_cpu_offload()
+ else:
- if use_default_attn_processor == 1:
+ upscaler = stored_upscaler_state
- upscaler.unet.set_default_attn_processor()
+ else:
- if device == "cuda":
- torch.cuda.empty_cache()
- else:
- upscaler.unet = torch.compile(
- upscaler.unet,
- mode = "reduce-overhead",
- fullgraph = True
- )
+ upscaler = ""
@@ -1938,6 +2192,7 @@ def create_image_function (
prompt_text,
prompt_text_not_used_substring
) = truncate_prompt(
+ pipe,
prompt_text
)
@@ -1951,6 +2206,7 @@ def create_image_function (
negative_prompt_text,
negative_prompt_text_not_used_substring
) = truncate_prompt(
+ pipe,
negative_prompt_text
)
@@ -1960,14 +2216,14 @@ def create_image_function (
message_about_prompt_truncation += "Your negative prompt has been truncated because it is too long. This part has been truncated:
" + negative_prompt_text_not_used_substring + ""
- prompt_truncated_field_udpate = gr.HTML(
+ prompt_truncated_field_update = gr.HTML(
value = "",
visible = False
)
if len(message_about_prompt_truncation) > 0:
- prompt_truncated_field_udpate = gr.HTML(
+ prompt_truncated_field_update = gr.HTML(
value = "Note: " + message_about_prompt_truncation + "
",
visible = True
)
@@ -1980,8 +2236,6 @@ def create_image_function (
if actual_seed == 0:
- import random
-
default_seed_maximum_for_random = default_seed_maximum
if default_seed_maximum_for_random > 9007199254740992:
@@ -2002,9 +2256,136 @@ def create_image_function (
if show_image_creation_progress_log == 1:
+
+
+ current_base_model_generation_start_time = 0
+
+ def callback_function_for_base_model_progress(
+ callback_pipe,
+ callback_step_index,
+ callback_timestep,
+ callback_kwargs
+ ):
+
+ global current_base_model_generation_start_time
+
+ if int(callback_step_index) == 0:
+
+ current_base_model_generation_start_time = time.time()
+
+ if int(callback_step_index) > 0:
+
+ seconds_per_step = ((time.time() - current_base_model_generation_start_time) / int(callback_step_index))
+
+ (
+ time_per_step_hours,
+ time_per_step_minutes,
+ time_per_step_seconds
+ ) = convert_seconds(seconds_per_step)
+
+ if time_per_step_hours > 0:
+
+ hours_text = "hr"
+
+ if time_per_step_hours > 1:
+
+ hours_text = "hrs"
+
+ nice_time_per_step = str(int(time_per_step_hours)) + " " + hours_text + ". " + str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec."
+
+ elif time_per_step_minutes > 0:
+
+ nice_time_per_step = str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec."
+
+ else:
+
+ nice_time_per_step = str(round(time_per_step_seconds, 2)) + " seconds"
+
+ base_model_progress_text = nice_time_per_step + " per step"
+
+ else:
+
+ base_model_progress_text = "Base model processing started"
+
+ progress(
+ progress = (
+ callback_step_index,
+ current_actual_total_base_model_steps
+ ),
+ desc = base_model_progress_text,
+ unit = "base model steps"
+ )
+
+ return {}
+
callback_to_do_for_base_model_progress = callback_function_for_base_model_progress
+
+
+
+ current_refiner_generation_start_time = 0
+
+ def callback_function_for_refiner_progress(
+ callback_pipe,
+ callback_step_index,
+ callback_timestep,
+ callback_kwargs
+ ):
+
+ global current_refiner_generation_start_time
+
+ if int(callback_step_index) == 0:
+
+ current_refiner_generation_start_time = time.time()
+
+ if int(callback_step_index) > 0:
+
+ seconds_per_step = ((time.time() - current_refiner_generation_start_time) / int(callback_step_index))
+
+ (
+ time_per_step_hours,
+ time_per_step_minutes,
+ time_per_step_seconds
+ ) = convert_seconds(seconds_per_step)
+
+ if time_per_step_hours > 0:
+
+ hours_text = "hr"
+
+ if time_per_step_hours > 1:
+
+ hours_text = "hrs"
+
+ nice_time_per_step = str(int(time_per_step_hours)) + " " + hours_text + ". " + str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec."
+
+ elif time_per_step_minutes > 0:
+
+ nice_time_per_step = str(int(time_per_step_minutes)) + " min. " + str(round(time_per_step_seconds, 1)) + " sec."
+
+ else:
+
+ nice_time_per_step = str(round(time_per_step_seconds, 2)) + " seconds"
+
+ refiner_progress_text = nice_time_per_step + " per step"
+
+ else:
+
+ refiner_progress_text = "Refner processing started"
+
+ progress(
+ progress = (
+ callback_step_index,
+ current_actual_total_refiner_steps
+ ),
+ desc = refiner_progress_text,
+ unit = "refiner steps"
+ )
+
+ return {}
+
callback_to_do_for_refiner_progress = callback_function_for_refiner_progress
+
+
else:
callback_to_do_for_base_model_progress = None
@@ -2012,7 +2393,31 @@ def create_image_function (
- if model_configuration_name_value.find("default") < 0:
+ is_sdxl_online_config = 0
+ is_photoreal_online_config = 0
+
+ if (
+ model_configuration_name_value == "sdxl_2023-11-12" or
+ model_configuration_name_value == "sdxl_2023-09-05"
+ ):
+
+ is_sdxl_online_config = 1
+
+ elif (
+ model_configuration_name_value == "photoreal_2023-11-12" or
+ model_configuration_name_value == "photoreal_2023-09-01"
+ ):
+
+ is_photoreal_online_config = 1
+
+
+
+
+
+ if (
+ (is_sdxl_online_config == 1) or
+ (is_photoreal_online_config == 1)
+ ):
@@ -2044,10 +2449,9 @@ def create_image_function (
high_noise_frac = refining_denoise_start_for_online_config_field_value
- if (
- model_configuration_name_value == "sdxl_2023-11-12" or
- model_configuration_name_value == "sdxl_2023-09-05"
- ):
+ if (is_sdxl_online_config) == 1:
+
+ add_seed_into_pipe = 1
n_steps = refining_number_of_iterations_for_online_config_field_value
@@ -2057,15 +2461,16 @@ def create_image_function (
upscaling_num_inference_steps = 5
- current_actual_total_upscaler_steps = upscaling_num_inference_steps
-
-
-
if show_messages_in_command_prompt == 1:
+
print ("Initial image creation has begun.");
if show_image_creation_progress_log == 1:
- current_progress_text = "Initial image creation has begun."
+
+ progress(
+ progress = 0,
+ desc = "Initial image creation has begun"
+ )
int_image = pipe(
prompt,
@@ -2083,10 +2488,15 @@ def create_image_function (
).images
if show_messages_in_command_prompt == 1:
+
print ("Refiner steps...");
if show_image_creation_progress_log == 1:
- current_progress_text = "Refining is beginning."
+
+ progress(
+ progress = 0,
+ desc = "Refining is beginning"
+ )
current_actual_total_refiner_steps = int(int(n_steps) * float(high_noise_frac))
@@ -2113,10 +2523,15 @@ def create_image_function (
if upscaling == 'Yes':
if show_messages_in_command_prompt == 1:
+
print ("Upscaler steps...");
if show_image_creation_progress_log == 1:
- current_progress_text = "Upscaling in progress.\n(step by step progress not displayed)"
+
+ progress(
+ progress = 0,
+ desc = "Upscaling in progress (no steps shown)"
+ )
# Changed
#
@@ -2145,10 +2560,9 @@ def create_image_function (
- elif (
- model_configuration_name_value == "photoreal_2023-11-12" or
- model_configuration_name_value == "photoreal_2023-09-01"
- ):
+ elif (is_photoreal_online_config == 1):
+
+ add_seed_into_pipe = 0
Prompt = prompt
upscale = refining # Not a mistake. This is wrong in code.
@@ -2158,10 +2572,15 @@ def create_image_function (
if upscale == "Yes":
if show_messages_in_command_prompt == 1:
+
print ("Initial image creation has begun.");
if show_image_creation_progress_log == 1:
- current_progress_text = "Initial image creation has begun."
+
+ progress(
+ progress = 0,
+ desc = "Initial image creation has begun"
+ )
int_image = pipe(
Prompt,
@@ -2174,10 +2593,15 @@ def create_image_function (
).images
if show_messages_in_command_prompt == 1:
+
print ("Refiner steps...");
if show_image_creation_progress_log == 1:
- current_progress_text = "Refining is beginning."
+
+ progress(
+ progress = 0,
+ desc = "Refining is beginning"
+ )
default_steps_in_diffusers = 50
@@ -2202,10 +2626,15 @@ def create_image_function (
else:
if show_messages_in_command_prompt == 1:
+
print ("Image creation has begun.");
if show_image_creation_progress_log == 1:
- current_progress_text = "Image creation has begun."
+
+ progress(
+ progress = 0,
+ desc = "Image creation has begun"
+ )
image = pipe(
Prompt,
@@ -2225,6 +2654,12 @@ def create_image_function (
+ if add_seed_into_pipe == 0:
+
+ generator = None
+
+
+
#
#
#
@@ -2293,11 +2728,16 @@ def create_image_function (
if use_upscaler == 1:
if show_messages_in_command_prompt == 1:
+
print ("Will create initial image, then refine and then upscale.");
print ("Initial image steps...");
if show_image_creation_progress_log == 1:
- current_progress_text = "Initial image creation has begun."
+
+ progress(
+ progress = 0,
+ desc = "Initial image creation has begun"
+ )
intitial_image = pipe(
prompt = prompt_text,
@@ -2314,10 +2754,15 @@ def create_image_function (
).images
if show_messages_in_command_prompt == 1:
+
print ("Refiner steps...");
if show_image_creation_progress_log == 1:
- current_progress_text = "Refining is beginning."
+
+ progress(
+ progress = 0,
+ desc = "Refining is beginning"
+ )
refined_image = refiner(
prompt = prompt_text,
@@ -2330,10 +2775,15 @@ def create_image_function (
).images
if show_messages_in_command_prompt == 1:
+
print ("Upscaler steps...");
if show_image_creation_progress_log == 1:
- current_progress_text = "Upscaling in progress.\n(step by step progress not displayed)"
+
+ progress(
+ progress = 0,
+ desc = "Upscaling in progress (no steps shown)"
+ )
upscaled_image = upscaler(
prompt = prompt_text,
@@ -2351,11 +2801,16 @@ def create_image_function (
else:
if show_messages_in_command_prompt == 1:
+
print ("Will create initial image and then refine.");
print ("Initial image steps...");
if show_image_creation_progress_log == 1:
- current_progress_text = "Initial image creation has begun."
+
+ progress(
+ progress = 0,
+ desc = "Initial image creation has begun"
+ )
intitial_image = pipe(
prompt = prompt_text,
@@ -2372,10 +2827,15 @@ def create_image_function (
).images
if show_messages_in_command_prompt == 1:
+
print ("Refiner steps...");
if show_image_creation_progress_log == 1:
- current_progress_text = "Refining is beginning."
+
+ progress(
+ progress = 0,
+ desc = "Refining is beginning"
+ )
refined_image = refiner(
prompt = prompt_text,
@@ -2396,11 +2856,16 @@ def create_image_function (
if use_upscaler == 1:
if show_messages_in_command_prompt == 1:
+
print ("Will create initial image and then upscale.");
print ("Initial image steps...");
if show_image_creation_progress_log == 1:
- current_progress_text = "Initial image creation has begun."
+
+ progress(
+ progress = 0,
+ desc = "Initial image creation has begun"
+ )
intitial_image = pipe(
prompt = prompt_text,
@@ -2416,10 +2881,15 @@ def create_image_function (
).images
if show_messages_in_command_prompt == 1:
+
print ("Upscaler steps...");
if show_image_creation_progress_log == 1:
- current_progress_text = "Upscaling in progress.\n(step by step progress not displayed)"
+
+ progress(
+ progress = 0,
+ desc = "Upscaling in progress (no steps shown)"
+ )
upscaled_image = upscaler(
prompt = prompt_text,
@@ -2437,11 +2907,16 @@ def create_image_function (
else:
if show_messages_in_command_prompt == 1:
+
print ("Will create image (no refining or upscaling).");
print ("Image steps...");
if show_image_creation_progress_log == 1:
- current_progress_text = "Image creation has begun."
+
+ progress(
+ progress = 0,
+ desc = "Image creation has begun"
+ )
image = pipe(
prompt = prompt_text,
@@ -2505,19 +2980,34 @@ def create_image_function (
"Seed: " + str(actual_seed)
])
+ nice_seed_added_to_generation = "No"
+
+ if add_seed_into_pipe == 1:
+
+ nice_seed_added_to_generation = "Yes"
+
+ info_about_prompt_lines_array.extend([
+ "Seed added to generation? " + nice_seed_added_to_generation
+ ])
+
if int(guidance_scale) > 0:
info_about_prompt_lines_array.extend([
"Guidance Scale: " + str(guidance_scale)
])
+ nice_scheduler_name = scheduler_short_names_object[scheduler_used]
+
+ if scheduler_value == "model_default":
+
+ nice_scheduler_name += " (model default)"
+
info_about_prompt_lines_array.extend([
"Steps: " + str(base_model_num_inference_steps),
- "Model: " + nice_model_name
+ "Model: " + nice_model_name,
+ "Scheduler/Sampler: " + nice_scheduler_name
])
-
-
if use_refiner == 1:
# Default Configuration
@@ -2608,12 +3098,12 @@ def create_image_function (
nice_default_attn_processor_usage = "Yes"
info_about_prompt_lines_array.extend([
- "Default AttnProcessor Used?: " + nice_default_attn_processor_usage
+ "Default AttnProcessor Used? " + nice_default_attn_processor_usage
])
- info_about_prompt = '\n'.join(info_about_prompt_lines_array)
+ output_text_field_update = '\n'.join(info_about_prompt_lines_array)
@@ -2650,26 +3140,26 @@ def create_image_function (
saved_text_file_path_and_file = saved_images_date_dir + file_name_without_extension + ".txt"
prompt_info_file_handle = open(saved_text_file_path_and_file, "w")
- prompt_info_file_handle.writelines(info_about_prompt)
+ prompt_info_file_handle.writelines(output_text_field_update)
prompt_info_file_handle.close()
- if use_image_gallery == 1:
+ output_image_field_update = gr.Image(
+ value = image_to_return
+ )
- image_gallery_array.insert(0, image_to_return)
- prompt_information_array.insert(0, info_about_prompt)
+ image_gallery_array_state_value.insert(0, image_to_return)
+ prompt_information_array_state_value.insert(0, output_text_field_update)
- output_image_field_update = gr.Gallery(
- value = image_gallery_array,
- selected_index = 0
- )
+ output_image_gallery_field_update = gr.Gallery(
+ value = image_gallery_array_state_value,
+ selected_index = 0
+ )
- else:
+ image_gallery_array_state_update = image_gallery_array_state_value
- output_image_field_update = gr.Image(
- value = image_to_return
- )
+ prompt_information_array_state_update = prompt_information_array_state_value
@@ -2679,11 +3169,24 @@ def create_image_function (
- return {
- output_image_field: output_image_field_update,
- output_text_field: info_about_prompt,
- prompt_truncated_field: prompt_truncated_field_udpate
- }
+ last_model_configuration_name_selected_state_update = last_model_configuration_name_selected_state_value
+ last_refiner_name_selected_state_update = last_refiner_name_selected_state_value
+ last_upscaler_name_selected_state_update = last_upscaler_name_selected_state_value
+
+
+
+ return (
+ output_image_field_update,
+ output_image_gallery_field_update,
+ output_text_field_update,
+ prompt_truncated_field_update,
+ last_model_configuration_name_selected_state_update,
+ last_refiner_name_selected_state_update,
+ last_upscaler_name_selected_state_update,
+ pipe,
+ refiner,
+ upscaler
+ )
@@ -2724,342 +3227,6 @@ def cancel_image_processing():
-#####################
-#
-# Base Model Field Update Function
-#
-# When the base model dropdown changes, this function is run.
-#
-#####################
-
-def base_model_field_update_function(
- base_model_field_index
-):
-
- base_model_field_value = base_model_array[base_model_field_index]
-
- if base_model_field_value in base_model_array:
-
- if base_model_field_value in base_model_object_of_model_configuration_arrays:
-
- model_configuration_choices_array_update = []
-
- for this_model_configuration in base_model_object_of_model_configuration_arrays[base_model_field_value]:
-
- model_configuration_choices_array_update.append(
- model_configuration_names_object[this_model_configuration]
- )
-
- if base_model_field_value in base_model_model_configuration_defaults_object:
-
- model_configuration_field_selected_value = stored_model_configuration_names_object[base_model_field_value]
-
- model_configuration_field_update = gr.Dropdown(
- choices = model_configuration_choices_array_update,
- value = model_configuration_field_selected_value
- )
-
- negative_prompt_field_visibility = True
- negative_prompt_for_sdxl_turbo_field_visibility = False
- base_model_num_inference_steps_field_visibility = True
- base_model_num_inference_steps_field_for_sdxl_turbo_visibility = False
- guidance_scale_field_visibility = True
- guidance_scale_for_sdxl_turbo_field_visibility = False
-
- if base_model_field_value == "sdxl_turbo":
-
- negative_prompt_field_visibility = False
- negative_prompt_for_sdxl_turbo_field_visibility = True
- base_model_num_inference_steps_field_visibility = False
- base_model_num_inference_steps_field_for_sdxl_turbo_visibility = True
- guidance_scale_field_visibility = False
- guidance_scale_for_sdxl_turbo_field_visibility = True
-
- negative_prompt_field_update = gr.Textbox(
- visible = negative_prompt_field_visibility
- )
-
- negative_prompt_for_sdxl_turbo_field_update = gr.HTML(
- visible = negative_prompt_for_sdxl_turbo_field_visibility
- )
-
- base_model_num_inference_steps_field_update = gr.Slider(
- visible = base_model_num_inference_steps_field_visibility
- )
-
- base_model_num_inference_steps_field_for_sdxl_turbo_update = gr.Slider(
- visible = base_model_num_inference_steps_field_for_sdxl_turbo_visibility
- )
-
- guidance_scale_field_update = gr.Slider(
- visible = guidance_scale_field_visibility
- )
-
- guidance_scale_for_sdxl_turbo_field_update = gr.HTML(
- visible = guidance_scale_for_sdxl_turbo_field_visibility
- )
-
- return {
- model_configuration_field: model_configuration_field_update,
- negative_prompt_field: negative_prompt_field_update,
- negative_prompt_for_sdxl_turbo_field: negative_prompt_for_sdxl_turbo_field_update,
- base_model_num_inference_steps_field: base_model_num_inference_steps_field_update,
- base_model_num_inference_steps_field_for_sdxl_turbo_field: base_model_num_inference_steps_field_for_sdxl_turbo_update,
- guidance_scale_field: guidance_scale_field_update,
- guidance_scale_for_sdxl_turbo_field: guidance_scale_for_sdxl_turbo_field_update
-
- }
-
- error_function("Error")
-
-
-
-
-
-
-
-#####################
-#
-# Model Configuration Field Update Function
-#
-# When the model configuration dropdown changes, this function is run.
-#
-#####################
-
-def model_configuration_field_update_function(
- base_model_field_index,
- model_configuration_field_index
-):
-
- base_model_field_value = base_model_array[base_model_field_index]
-
- if base_model_field_value in base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]:
-
- model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]
-
-
-
- stored_model_configuration_names_object[base_model_field_value] = model_configuration_names_object[model_configuration_name_value]
-
-
-
- is_default_config_state = 0
-
- if model_configuration_name_value in default_model_configuration_object:
-
- is_default_config_state = 1
-
- negative_prompt_field_visibility = True
- negative_prompt_for_sdxl_turbo_field_visibility = False
- base_model_num_inference_steps_field_visibility = True
- base_model_num_inference_steps_field_for_sdxl_turbo_visibility = False
- guidance_scale_field_visibility = True
- guidance_scale_for_sdxl_turbo_field_visibility = False
-
- if base_model_field_value == "sdxl_turbo":
-
- negative_prompt_field_visibility = False
- negative_prompt_for_sdxl_turbo_field_visibility = True
- base_model_num_inference_steps_field_visibility = False
- base_model_num_inference_steps_field_for_sdxl_turbo_visibility = True
- guidance_scale_field_visibility = False
- guidance_scale_for_sdxl_turbo_field_visibility = True
-
- negative_prompt_field_update = gr.Textbox(
- visible = negative_prompt_field_visibility
- )
-
- negative_prompt_for_sdxl_turbo_field_update = gr.HTML(
- visible = negative_prompt_for_sdxl_turbo_field_visibility
- )
-
- base_model_num_inference_steps_field_update = gr.Slider(
- visible = base_model_num_inference_steps_field_visibility
- )
-
- base_model_num_inference_steps_field_for_sdxl_turbo_update = gr.Slider(
- visible = base_model_num_inference_steps_field_for_sdxl_turbo_visibility
- )
-
- guidance_scale_field_update = gr.Slider(
- visible = guidance_scale_field_visibility
- )
-
- guidance_scale_for_sdxl_turbo_field_update = gr.HTML(
- visible = guidance_scale_for_sdxl_turbo_field_visibility
- )
-
-
-
- refiner_default_config_accordion_visibility = False
- refiner_online_config_accordion_visibility = True
-
- if is_default_config_state == 1:
-
- refiner_default_config_accordion_visibility = True
- refiner_online_config_accordion_visibility = False
-
-
-
- refining_selection_automatically_selected_message_field_visibility = False
-
- refining_selection_online_config_normal_field_visibility = True
- refining_selection_online_config_automatically_selected_field_visibility = False
-
- if model_configuration_name_value in model_configuration_force_refiner_object:
-
- refining_selection_automatically_selected_message_field_visibility = True
-
- refining_selection_online_config_normal_field_visibility = False
- refining_selection_online_config_automatically_selected_field_visibility = True
-
- refining_number_of_iterations_for_online_config_field_visibility = False
-
- if model_configuration_name_value in model_configuration_include_refiner_number_of_steps_object:
-
- refining_number_of_iterations_for_online_config_field_visibility = True
-
-
-
- refiner_default_config_accordion_update = gr.Accordion(
- visible = refiner_default_config_accordion_visibility
- )
-
- refiner_online_config_accordion_update = gr.Accordion(
- visible = refiner_online_config_accordion_visibility
- )
-
- refining_selection_automatically_selected_message_field_update = gr.Markdown(
- visible = refining_selection_automatically_selected_message_field_visibility
- )
-
- refining_selection_online_config_normal_field_update = gr.Radio(
- visible = refining_selection_online_config_normal_field_visibility
- )
-
- refining_selection_online_config_automatically_selected_field_update = gr.Radio(
- visible = refining_selection_online_config_automatically_selected_field_visibility
- )
-
- refining_number_of_iterations_for_online_config_field_update = gr.Radio(
- visible = refining_number_of_iterations_for_online_config_field_visibility
- )
-
-
-
- return {
- negative_prompt_field: negative_prompt_field_update,
- negative_prompt_for_sdxl_turbo_field: negative_prompt_for_sdxl_turbo_field_update,
- base_model_num_inference_steps_field: base_model_num_inference_steps_field_update,
- base_model_num_inference_steps_field_for_sdxl_turbo_field: base_model_num_inference_steps_field_for_sdxl_turbo_update,
- guidance_scale_field: guidance_scale_field_update,
- guidance_scale_for_sdxl_turbo_field: guidance_scale_for_sdxl_turbo_field_update,
-
- refiner_default_config_accordion: refiner_default_config_accordion_update,
- refiner_online_config_accordion: refiner_online_config_accordion_update,
- refining_selection_automatically_selected_message_field: refining_selection_automatically_selected_message_field_update,
- refining_selection_online_config_normal_field: refining_selection_online_config_normal_field_update,
- refining_selection_online_config_automatically_selected_field: refining_selection_online_config_automatically_selected_field_update,
-
- refining_number_of_iterations_for_online_config_field: refining_number_of_iterations_for_online_config_field_update
-
- }
-
- error_function("Error")
-
-
-
-
-
-
-
-#####################
-#
-# Update Refiner and Upscaler Status Function
-#
-# When the refiner or upscaler is turned on or off, a text message is
-# printed on the page. That needs to be updated.
-#
-#####################
-
-def update_refiner_and_upscaler_status_function(
- base_model_field_index,
- model_configuration_field_index,
- refining_selection_default_config_field_value,
- refining_selection_online_config_normal_field_value,
- refining_selection_online_config_automatically_selected_field_value,
- upscaling_selection_field_value
-):
-
- base_model_field_value = base_model_array[base_model_field_index]
-
- if base_model_field_value in base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]:
-
- model_configuration_name_value = base_model_object_of_model_configuration_arrays[base_model_field_value][model_configuration_field_index]
-
- is_default_config_state = 0
-
- if model_configuration_name_value in default_model_configuration_object:
-
- is_default_config_state = 1
-
- refining_selection_default_config_field_value = numerical_bool(refining_selection_default_config_field_value)
- refining_selection_online_config_normal_field_value = numerical_bool(refining_selection_online_config_normal_field_value)
- refining_selection_online_config_automatically_selected_field_value = numerical_bool(refining_selection_online_config_automatically_selected_field_value)
- upscaling_selection_field_value = numerical_bool(upscaling_selection_field_value)
-
- refiner_and_upscaler_status_text = refiner_and_upscaler_status_opening_html
-
- if (
- (
- (is_default_config_state == 1) and
- refining_selection_default_config_field_value
- ) or (
- (is_default_config_state != 1) and
- (
- (
- (model_configuration_name_value not in model_configuration_force_refiner_object) and
- refining_selection_online_config_normal_field_value
- ) or (
- (model_configuration_name_value in model_configuration_force_refiner_object) and
- refining_selection_online_config_automatically_selected_field_value
- )
- )
- )
- ):
-
- refiner_and_upscaler_status_text += refiner_on_text
-
- else:
-
- refiner_and_upscaler_status_text += refiner_off_text
-
- if upscaling_selection_field_value == 1:
-
- refiner_and_upscaler_status_text += upscaler_on_text
-
- else:
-
- refiner_and_upscaler_status_text += upscaler_off_text
-
- refiner_and_upscaler_status_text += refiner_and_upscaler_status_closing_html
-
- refiner_and_upscaler_text_field_update = gr.HTML(
- value = refiner_and_upscaler_status_text
- )
-
- return {
- refiner_and_upscaler_text_field: refiner_and_upscaler_text_field_update
- }
-
- error_function("Error")
-
-
-
-
-
-
-
###############################################################################
###############################################################################
#
@@ -3083,16 +3250,9 @@ def update_refiner_and_upscaler_status_function(
# Hide border when yield is used:
# https://github.com/gradio-app/gradio/issues/5479
# .generating {border: none !important;}
-#
-# Remove orange border for generation progress.
-# #generation_progress_id div {border: none;}
css_to_use = "footer{display:none !important}"
-if show_image_creation_progress_log == 1:
-
- css_to_use += "#generation_progress_id div {border: none;}"
-
with gr.Blocks(
title = "AI Image Creation",
css = css_to_use,
@@ -3104,6 +3264,20 @@ with gr.Blocks(
analytics_enabled = False
) as sd_interface:
+ # Variables to store for user session
+
+ image_gallery_array_state = gr.State([])
+
+ prompt_information_array_state = gr.State([])
+
+ last_model_configuration_name_selected_state = gr.State("")
+ last_refiner_name_selected_state = gr.State("")
+ last_upscaler_name_selected_state = gr.State("")
+
+ stored_pipe_state = gr.State({})
+ stored_refiner_state = gr.State({})
+ stored_upscaler_state = gr.State({})
+
gr.Markdown(opening_html)
with gr.Row():
@@ -3124,19 +3298,23 @@ with gr.Blocks(
value = default_prompt
)
- with gr.Row():
+ with gr.Row(
+ elem_id = "negative_prompt_field_row_id",
+ visible = default_negative_prompt_field_row_visibility
+ ):
negative_prompt_field = gr.Textbox(
label = "Negative Prompt (77 token limit):",
- value = default_negative_prompt,
- visible = default_negative_prompt_field_visibility
+ value = default_negative_prompt
)
- with gr.Row():
+ with gr.Row(
+ elem_id = "negative_prompt_for_sdxl_turbo_field_row_id",
+ visible = default_negative_prompt_for_sdxl_turbo_field_row_visibility
+ ):
negative_prompt_for_sdxl_turbo_field = gr.HTML(
- value = "Negative prompt is not used for SDXL Turbo.
",
- visible = default_negative_prompt_for_sdxl_turbo_field_visibility
+ value = "Negative prompt is not used for SDXL Turbo.
"
)
with gr.Group(
@@ -3144,8 +3322,8 @@ with gr.Blocks(
):
with gr.Accordion(
- label = "Refiner (Default configuration)",
elem_id = "refiner_default_config_accordion_id",
+ label = "Refiner (Default configuration)",
open = refiner_default_config_accordion_open,
visible = refiner_default_config_accordion_visible
) as refiner_default_config_accordion:
@@ -3200,8 +3378,8 @@ with gr.Blocks(
)
with gr.Accordion(
- label = "Refiner (Online configuration)",
elem_id = "refiner_online_config_accordion_id",
+ label = "Refiner (Online configuration)",
open = refiner_online_config_accordion_open,
visible = refiner_online_config_accordion_visible
) as refiner_online_config_accordion:
@@ -3229,6 +3407,7 @@ with gr.Blocks(
with gr.Row():
refining_selection_automatically_selected_message_field = gr.Markdown(
+ elem_id = "refining_selection_automatically_selected_message_field_id",
value = "The online configuration you selected automatically uses the refiner.",
visible = refining_selection_automatically_selected_message_field_visible
)
@@ -3236,6 +3415,7 @@ with gr.Blocks(
with gr.Row():
refining_selection_online_config_normal_field = gr.Radio(
+ elem_id = "refining_selection_online_config_normal_field_id",
choices = ["Yes", "No"],
value = default_refine_option,
show_label = False,
@@ -3246,6 +3426,7 @@ with gr.Blocks(
with gr.Row():
refining_selection_online_config_automatically_selected_field = gr.Radio(
+ elem_id = "refining_selection_online_config_automatically_selected_field_id",
choices = ["Yes"],
value = "Yes",
show_label = False,
@@ -3263,21 +3444,23 @@ with gr.Blocks(
step = 0.01
)
- with gr.Row():
+ refining_number_of_iterations_for_online_config_field_visible = False
- refining_number_of_iterations_for_online_config_field_visible = False
+ if default_model_configuration in model_configuration_include_refiner_number_of_steps_object:
- if default_model_configuration in model_configuration_include_refiner_number_of_steps_object:
+ refining_number_of_iterations_for_online_config_field_visible = True
- refining_number_of_iterations_for_online_config_field_visible = True
+ with gr.Row(
+ elem_id = "refining_number_of_iterations_for_online_config_field_row_id",
+ visible = refining_number_of_iterations_for_online_config_field_visible
+ ):
refining_number_of_iterations_for_online_config_field = gr.Slider(
label = "Refiner number of iterations",
minimum = 1,
maximum = 100,
value = 100,
- step = 1,
- visible = refining_number_of_iterations_for_online_config_field_visible
+ step = 1
)
with gr.Group(
@@ -3315,10 +3498,10 @@ with gr.Blocks(
with gr.Row():
upscaling_num_inference_steps_field = gr.Slider(
- label = "Upscaler number of iterations",
+ label = "Upscaler number of steps",
minimum = 1,
- maximum = 100,
- value = 100,
+ maximum = maximum_upscaler_steps,
+ value = default_upscaler_steps,
step = 1
)
@@ -3328,7 +3511,7 @@ with gr.Blocks(
):
refiner_and_upscaler_text_field = gr.HTML(
- value = default_refiner_and_upscaler_status_text
+ value = "" + default_refiner_and_upscaler_status_text + "
"
)
with gr.Column(scale = 1):
@@ -3342,20 +3525,68 @@ with gr.Blocks(
choices = default_base_model_choices_array,
value = default_base_model_nicely_named_value,
type = "index",
- #info = "Main model type",
filterable = False,
- min_width = 240,
+ #min_width = 240,
interactive = True
)
- model_configuration_field = gr.Dropdown(
- label = "Configuration Type:",
- choices = default_model_configuration_choices_array,
- value = default_model_configuration_nicely_named_value,
+ model_configuration_dropdown_field_values_for_js = ""
+
+ model_configuration_dropdown_fields_array = []
+
+ for this_base_model in base_model_array:
+
+ this_model_configuration_choices_array = []
+
+ for this_model_configuration in base_model_object_of_model_configuration_arrays[this_base_model]:
+
+ this_model_configuration_choices_array.append(
+ model_configuration_names_object[this_model_configuration]
+ )
+
+ this_configuration_field_row_visibility = False
+
+ if (
+ (this_base_model == default_base_model) and
+ (allow_online_configurations == 1)
+ ):
+
+ this_configuration_field_row_visibility = True
+
+ this_configuration_field_default_value = model_configuration_names_object[base_model_model_configuration_defaults_object[this_base_model]]
+
+ this_configuration_field_default_value_for_js = this_configuration_field_default_value
+ this_configuration_field_default_value_for_js = this_configuration_field_default_value_for_js.replace("\"", "\\\"")
+
+ model_configuration_dropdown_field_values_for_js += "\"" + this_base_model + "\": \"" + this_configuration_field_default_value_for_js + "\","
+
+ with gr.Row(
+ elem_id = "model_configuration_field_" + this_base_model + "_row_id",
+ visible = this_configuration_field_row_visibility
+ ):
+
+ this_configuration_field = gr.Dropdown(
+ label = "Configuration Type:",
+ choices = this_model_configuration_choices_array,
+ value = this_configuration_field_default_value,
+ type = "index",
+ filterable = False,
+ #min_width = 240,
+ interactive = True
+ )
+
+ model_configuration_dropdown_fields_array.append(this_configuration_field)
+
+ with gr.Row():
+
+ scheduler_field = gr.Dropdown(
+ elem_id = "scheduler_field_id",
+ label = "Scheduler / Sampler:",
+ choices = default_scheduler_choices_array,
+ value = default_scheduler_nicely_named_value,
type = "index",
- #info = "See end of page for info.",
filterable = False,
- min_width = 240,
+ #min_width = 240,
interactive = True
)
@@ -3379,7 +3610,10 @@ with gr.Blocks(
interactive = True
)
- with gr.Row():
+ with gr.Row(
+ elem_id = "base_model_num_inference_steps_field_row_id",
+ visible = default_base_model_num_inference_steps_field_row_visibility
+ ):
base_model_num_inference_steps_field = gr.Slider(
label = "Steps:",
@@ -3387,11 +3621,13 @@ with gr.Blocks(
maximum = 100,
value = default_base_model_base_model_num_inference_steps,
step = 1,
- visible = default_base_model_num_inference_steps_field_visibility,
interactive = True
)
- with gr.Row():
+ with gr.Row(
+ elem_id = "base_model_num_inference_steps_field_for_sdxl_turbo_field_row_id",
+ visible = default_base_model_num_inference_steps_field_for_sdxl_turbo_field_row_visibility
+ ):
base_model_num_inference_steps_field_for_sdxl_turbo_field = gr.Slider(
label = "Steps:",
@@ -3400,11 +3636,13 @@ with gr.Blocks(
maximum = 25,
value = default_base_model_base_model_num_inference_steps_for_sdxl_turbo,
step = 1,
- visible = default_base_model_num_inference_steps_field_for_sdxl_turbo_field_visibility,
interactive = True
)
- with gr.Row():
+ with gr.Row(
+ elem_id = "guidance_scale_field_row_id",
+ visible = default_guidance_scale_field_row_visibility
+ ):
guidance_scale_field = gr.Slider(
label = "Guidance Scale:",
@@ -3412,25 +3650,44 @@ with gr.Blocks(
maximum = 15,
value = default_guidance_scale_value,
step = 0.25,
- visible = default_guidance_scale_field_visibility,
interactive = True
)
- with gr.Row():
+ with gr.Row(
+ elem_id = "guidance_scale_for_sdxl_turbo_field_row_id",
+ visible = default_guidance_scale_for_sdxl_turbo_field_row_visibility
+ ):
guidance_scale_for_sdxl_turbo_field = gr.HTML(
- value = "Guidance scale is not used for SDXL Turbo.
",
- visible = default_guidance_scale_for_sdxl_turbo_field_visibility
+ value = "Guidance scale is not used for SDXL Turbo.
"
)
with gr.Row():
+ # If you use a slider or number field for the seed, some
+ # seeds can't be duplicated using those fields. If you
+ # enter a number greater than 9007199254740992, the seed
+ # won't reliably be used. This is a technical limitation
+ # as of writing this. See the bug report here:
+ # https://github.com/gradio-app/gradio/issues/5354
+ #
+ # Until this is fixed, we use a textbox if the max seed
+ # allowed is greater than that number. Using the slider,
+ # and not entering a number, might be the way to get
+ # reliable numbers above that number, if you just don't
+ # then use the up and down arrows in the field to go up
+ # or down a number.
+ #
+ # For now, I do this, but I might eventually have a
+ # setting on the page to allow the slider.
+
if make_seed_selection_a_textbox == 1:
seed_field = gr.Textbox(
- label = "Seed (0 is random; " + str(default_seed_maximum) + " max):",
- value = "0",
- interactive = True
+ label = "Seed (0 is random):",
+ value = default_seed_value,
+ interactive = True,
+ info = "Maximum: " + str(default_seed_maximum)
)
else:
@@ -3444,38 +3701,56 @@ with gr.Blocks(
interactive = True
)
+ with gr.Row(
+ elem_id = "add_seed_into_pipe_field_row_id",
+ visible = default_add_seed_into_pipe_field_row_visibility
+ ):
+
+ add_seed_into_pipe_field = gr.Checkbox(
+ label = "Add seed to generation (to make it deterministic)",
+ value = default_add_seed_into_pipe_is_selected,
+ interactive = True,
+ container = True
+ )
+
with gr.Column(scale = 1):
- with gr.Row():
+ image_field_visibility = True
+ image_gallery_field_visibility = False
- if use_image_gallery == 1:
+ if use_image_gallery == 1:
- show_download_button = False
+ image_field_visibility = False
+ image_gallery_field_visibility = True
- if show_download_button_for_gallery == 1:
+ with gr.Row(
+ visible = image_field_visibility
+ ):
- show_download_button = True
+ output_image_field = gr.Image(
+ label = "Generated Image",
+ type = "pil"
+ )
- output_image_field = gr.Gallery(
- label = "Generated Images",
- value = [],
-# columns = 1,
-# rows = 1,
- selected_index = 0,
- elem_id = "image_gallery",
- allow_preview = "True",
- preview = True,
- show_download_button = show_download_button
- )
+ with gr.Row(
+ visible = image_gallery_field_visibility
+ ):
- else:
+ show_download_button = False
- output_image_field = gr.Image(
- label = "Generated Image",
- type = "pil"
- )
+ show_download_button = True
+
+ output_image_gallery_field = gr.Gallery(
+ elem_id = "image_gallery_id",
+ label = "Generated Images",
+ value = [],
+ selected_index = 0,
+ allow_preview = "True",
+ preview = True,
+ show_download_button = show_download_button
+ )
with gr.Row():
@@ -3485,22 +3760,7 @@ with gr.Blocks(
show_copy_button = True,
lines = 10,
max_lines = 20,
- every = None#,
- #container = False
- )
-
- with gr.Row():
-
- log_text_field = gr.Textbox(
- label = "Generation Progress:",
-
- elem_id = "generation_progress_id",
- elem_classes = "",
-
- interactive = False,
- value = "",
- show_copy_button = False,
- visible = False
+ container = True
)
with gr.Row():
@@ -3517,73 +3777,375 @@ with gr.Blocks(
variant = "stop"
)
- gr.Markdown("Closing the command prompt will cancel any images in the process of being created. You will need to launch it again to create more images.")
+ gr.Markdown("Closing the command prompt will cancel any images in the process of being created. You will need to launch it again, and then likely refresh the page, to create more images.")
if len(ending_html) > 0:
with gr.Accordion(
- label = "Information",
elem_id = "information_section_id",
+ label = "Information",
open = True
):
gr.Markdown(ending_html)
+
+
+ #####################
+ #
+ # Update Refiner and Upscaler Status Function for Javascript
+ #
+ # When the refiner or upscaler is turned on or off, a text message is
+ # printed on the page. That needs to be updated.
+ #
+ #####################
+
+ update_refiner_and_upscaler_status_function_js = """
+
+function updateRefinerAndUpscalerStatus(
+ baseModelFieldFullNameValue,
+ refiningSelectionDefaultConfigFieldValue,
+ refiningSelectionOnlineConfigNormalFieldValue,
+ refiningSelectionOnlineConfigAutomaticallySelectedFieldValue,
+ upscalingSelectionFieldValue
+) {{
+ "use strict";
+
+ var baseModelNamesObject = {0};
+ var modelConfigurationNamesObject = {1};
+ var onlineConfigurationsObject = {2};
+ var modelConfigurationForceRefinerObject = {3};
+
+ var refinerOnText = "{4}";
+ var refinerOffText = "{5}";
+ var upscalerOnText = "{6}";
+ var upscalerOffText = "{7}";
+
+ var baseModelFullNamesToBaseModelIdConversion = {{}};
+ Object.keys(baseModelNamesObject).forEach(key => {{
+ baseModelFullNamesToBaseModelIdConversion[baseModelNamesObject[key]] = key;
+ }});
+ var baseModelFieldValue = "";
+ if (baseModelFullNamesToBaseModelIdConversion.hasOwnProperty(baseModelFieldFullNameValue)) {{
+ baseModelFieldValue = baseModelFullNamesToBaseModelIdConversion[baseModelFieldFullNameValue];
+ }}
+
+ var modelConfigurationFullNameValue = window.modelConfigurationDropdownFieldValuesObject[baseModelFieldValue];
+
+ var modelConfigurationFullNamesToModelConfigurationIdConversion = {{}};
+ Object.keys(modelConfigurationNamesObject).forEach(key => {{
+ modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationNamesObject[key]] = key;
+ }});
+ var modelConfigurationNameValue = "";
+ if (modelConfigurationFullNamesToModelConfigurationIdConversion.hasOwnProperty(modelConfigurationFullNameValue)) {{
+ modelConfigurationNameValue = modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationFullNameValue];
+ }}
+
+ var refinerAndUpscalerInfoMessageHtml = "";
+
+ if (
+ baseModelFieldValue &&
+ modelConfigurationNameValue
+ ) {{
+
+ var isDefaultConfigState = 1;
+
+ if (onlineConfigurationsObject[modelConfigurationNameValue]) {{
+
+ isDefaultConfigState = 0;
+
+ }}
+
+ if (
+ (
+ (isDefaultConfigState === 1) &&
+ (refiningSelectionDefaultConfigFieldValue === "Yes")
+ ) || (
+ (isDefaultConfigState !== 1) &&
+ (
+ (
+ (!Object.keys(modelConfigurationForceRefinerObject).includes(modelConfigurationNameValue)) &&
+ (refiningSelectionOnlineConfigNormalFieldValue === "Yes")
+ ) || (
+ (Object.keys(modelConfigurationForceRefinerObject).includes(modelConfigurationNameValue)) &&
+ (refiningSelectionOnlineConfigAutomaticallySelectedFieldValue === "Yes")
+ )
+ )
+ )
+ ) {{
+
+ refinerAndUpscalerInfoMessageHtml += refinerOnText;
+
+ }}
+ else {{
+
+ refinerAndUpscalerInfoMessageHtml += refinerOffText;
+
+ }}
+
+ if (upscalingSelectionFieldValue === "Yes") {{
+
+ refinerAndUpscalerInfoMessageHtml += upscalerOnText;
+
+ }}
+ else {{
+
+ refinerAndUpscalerInfoMessageHtml += upscalerOffText;
+
+ }}
+
+ }}
+
+ document.getElementById("refiner_and_upscaler_info_message_div_id").innerHTML = refinerAndUpscalerInfoMessageHtml;
+
+}}
+
+""".format(
+ base_model_names_object,
+ model_configuration_names_object,
+ online_configurations_object,
+ model_configuration_force_refiner_object,
+ refiner_on_text,
+ refiner_off_text,
+ upscaler_on_text,
+ upscaler_off_text
+)
+
+
+
+ #####################
+ #
+ # Model Change Function for Javascript
+ #
+ # When the base model or model configuration is changed, we may need
+ # to show and hide certain fields.
+ #
+ #####################
+
+ model_change_function_js = """
+
+function modelChange(
+ baseModelFieldFullNameValue,
+ possiblyModelConfigurationFullNameValue
+) {{
+ "use strict";
+
+ var baseModelNamesObject = {0};
+ var modelConfigurationNamesObject = {1};
+
+ var baseModelArray = {2};
+
+ var onlineConfigurationsObject = {3};
+ var modelConfigurationForceRefinerObject = {4};
+ var modelConfigurationIncludeRefinerNumberOfStepsObject = {5};
+
+ var allowOnlineConfigurations = {6};
+
+ var baseModelFullNamesToBaseModelIdConversion = {{}};
+ Object.keys(baseModelNamesObject).forEach(key => {{
+ baseModelFullNamesToBaseModelIdConversion[baseModelNamesObject[key]] = key;
+ }});
+ var baseModelFieldValue = "";
+ if (baseModelFullNamesToBaseModelIdConversion.hasOwnProperty(baseModelFieldFullNameValue)) {{
+ baseModelFieldValue = baseModelFullNamesToBaseModelIdConversion[baseModelFieldFullNameValue];
+ }}
+
+ var modelConfigurationFullNameValue = ""
+
+ var isBaseModelDropdownChange = 0
+
+ if (baseModelFieldFullNameValue === possiblyModelConfigurationFullNameValue) {{
+
+ isBaseModelDropdownChange = 1;
+
+ modelConfigurationFullNameValue = window.modelConfigurationDropdownFieldValuesObject[baseModelFieldValue];
+
+ }}
+ else {{
+
+ modelConfigurationFullNameValue = possiblyModelConfigurationFullNameValue;
+
+ window.modelConfigurationDropdownFieldValuesObject[baseModelFieldValue] = modelConfigurationFullNameValue;
+
+ }}
+
+ var modelConfigurationFullNamesToModelConfigurationIdConversion = {{}};
+ Object.keys(modelConfigurationNamesObject).forEach(key => {{
+ modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationNamesObject[key]] = key;
+ }});
+ var modelConfigurationNameValue = "";
+ if (modelConfigurationFullNamesToModelConfigurationIdConversion.hasOwnProperty(modelConfigurationFullNameValue)) {{
+ modelConfigurationNameValue = modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationFullNameValue];
+ }}
+
+ for (var thisBaseModel of baseModelArray) {{
+
+ var thisModelConfigurationElementId = "model_configuration_field_" + thisBaseModel + "_row_id";
+
+ var thisModelConfigurationElementDisplay = "none";
+
+ if (
+ (thisBaseModel === baseModelFieldValue) &&
+ (allowOnlineConfigurations === 1)
+ ) {{
+
+ thisModelConfigurationElementDisplay = "block";
+
+ }}
+
+ document.getElementById(thisModelConfigurationElementId).style.display = thisModelConfigurationElementDisplay;
+
+ }}
+
+ var modelConfigurationFullNamesToModelConfigurationIdConversion = {{}};
+ Object.keys(modelConfigurationNamesObject).forEach(key => {{
+ modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationNamesObject[key]] = key;
+ }});
+ var modelConfigurationNameValue = "";
+ if (modelConfigurationFullNamesToModelConfigurationIdConversion.hasOwnProperty(modelConfigurationFullNameValue)) {{
+ modelConfigurationNameValue = modelConfigurationFullNamesToModelConfigurationIdConversion[modelConfigurationFullNameValue];
+ }}
+
+ if (
+ baseModelFieldValue &&
+ modelConfigurationNameValue
+ ) {{
+
+ var isDefaultConfigState = 1;
+
+ if (onlineConfigurationsObject[modelConfigurationNameValue]) {{
+
+ isDefaultConfigState = 0;
+
+ }}
+
+ var negativePromptFieldDisplay = "block";
+ var negativePromptForSdxlTurboFieldDisplay = "none";
+ var baseModelNumInferenceStepsFieldDisplay = "block";
+ var baseModelNumInferenceStepsFieldForSdxlTurboFieldDisplay = "none";
+ var guidanceScaleFieldDisplay = "block";
+ var guidanceScaleForSdxlTurboFieldDisplay = "none";
+
+ if (baseModelFieldValue === "sdxl_turbo") {{
+
+ negativePromptFieldDisplay = "none";
+ negativePromptForSdxlTurboFieldDisplay = "block";
+ baseModelNumInferenceStepsFieldDisplay = "none";
+ baseModelNumInferenceStepsFieldForSdxlTurboFieldDisplay = "block";
+ guidanceScaleFieldDisplay = "none";
+ guidanceScaleForSdxlTurboFieldDisplay = "block";
+
+ }}
+
+ document.getElementById("negative_prompt_field_row_id").style.display = negativePromptFieldDisplay;
+ document.getElementById("negative_prompt_for_sdxl_turbo_field_row_id").style.display = negativePromptForSdxlTurboFieldDisplay;
+ document.getElementById("base_model_num_inference_steps_field_row_id").style.display = baseModelNumInferenceStepsFieldDisplay;
+ document.getElementById("base_model_num_inference_steps_field_for_sdxl_turbo_field_row_id").style.display = baseModelNumInferenceStepsFieldForSdxlTurboFieldDisplay;
+ document.getElementById("guidance_scale_field_row_id").style.display = guidanceScaleFieldDisplay;
+ document.getElementById("guidance_scale_for_sdxl_turbo_field_row_id").style.display = guidanceScaleForSdxlTurboFieldDisplay;
+
+ var refinerDefaultConfigAccordionDisplay = "none";
+ var refinerOnlineConfigAccordionDisplay = "block";
+
+ var addSeedIntoPipeFieldDisplay = "none";
+
+ if (isDefaultConfigState === 1) {{
+
+ refinerDefaultConfigAccordionDisplay = "block";
+ refinerOnlineConfigAccordionDisplay = "none";
+
+ addSeedIntoPipeFieldDisplay = "block";
+
+ }}
+
+ document.getElementById("refiner_default_config_accordion_id").style.display = refinerDefaultConfigAccordionDisplay;
+ document.getElementById("refiner_online_config_accordion_id").style.display = refinerOnlineConfigAccordionDisplay;
+
+ document.getElementById("add_seed_into_pipe_field_row_id").style.display = addSeedIntoPipeFieldDisplay;
+
+ var refiningSelectionAutomaticallySelectedMessageFieldDisplay = "none";
+
+ var refiningSelectionOnlineConfigNormalFieldDisplay = "block";
+ var refiningSelectionOnlineConfigAutomaticallySelectedFieldDisplay = "none";
+
+ if (Object.keys(modelConfigurationForceRefinerObject).includes(modelConfigurationNameValue)) {{
+
+ refiningSelectionAutomaticallySelectedMessageFieldDisplay = "block";
+
+ refiningSelectionOnlineConfigNormalFieldDisplay = "none";
+ refiningSelectionOnlineConfigAutomaticallySelectedFieldDisplay = "block";
+
+ }}
+
+ var refiningNumberOfIterationsForOnlineConfigFieldDisplay = "none";
+
+ if (Object.keys(modelConfigurationIncludeRefinerNumberOfStepsObject).includes(modelConfigurationNameValue)) {{
+
+ refiningNumberOfIterationsForOnlineConfigFieldDisplay = "block";
+
+ }}
+
+ document.getElementById("refining_selection_automatically_selected_message_field_id").style.display = refiningSelectionAutomaticallySelectedMessageFieldDisplay;
+ document.getElementById("refining_selection_online_config_normal_field_id").style.display = refiningSelectionOnlineConfigNormalFieldDisplay;
+ document.getElementById("refining_selection_online_config_automatically_selected_field_id").style.display = refiningSelectionOnlineConfigAutomaticallySelectedFieldDisplay;
+ document.getElementById("refining_number_of_iterations_for_online_config_field_row_id").style.display = refiningNumberOfIterationsForOnlineConfigFieldDisplay;
+
+ }}
+
+}}
+
+""".format(
+ base_model_names_object,
+ model_configuration_names_object,
+ base_model_array,
+ online_configurations_object,
+ model_configuration_force_refiner_object,
+ model_configuration_include_refiner_number_of_steps_object,
+ allow_online_configurations
+)
+
+
+
base_model_field.change(
- fn = base_model_field_update_function,
+ fn = None,
inputs = [
base_model_field
],
- outputs = [
- model_configuration_field,
- negative_prompt_field,
- negative_prompt_for_sdxl_turbo_field,
- base_model_num_inference_steps_field,
- base_model_num_inference_steps_field_for_sdxl_turbo_field,
- guidance_scale_field,
- guidance_scale_for_sdxl_turbo_field
- ],
- queue = None,
- show_progress = "hidden"
+ outputs = None,
+ js = model_change_function_js
)
- model_configuration_field.change(
- fn = model_configuration_field_update_function,
+
+
+ for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array:
+
+ this_model_configuration_dropdown_field.change(
+ fn = None,
+ inputs = [
+ base_model_field,
+ this_model_configuration_dropdown_field
+ ],
+ outputs = None,
+ js = model_change_function_js
+ )
+
+
+
+ output_image_gallery_field.select(
+ fn = update_prompt_info_from_gallery,
inputs = [
- base_model_field,
- model_configuration_field
+ prompt_information_array_state
],
outputs = [
- negative_prompt_field,
- negative_prompt_for_sdxl_turbo_field,
- base_model_num_inference_steps_field,
- base_model_num_inference_steps_field_for_sdxl_turbo_field,
- guidance_scale_field,
- guidance_scale_for_sdxl_turbo_field,
- refiner_default_config_accordion,
- refiner_online_config_accordion,
- refining_selection_automatically_selected_message_field,
- refining_selection_online_config_normal_field,
- refining_selection_online_config_automatically_selected_field,
- refining_number_of_iterations_for_online_config_field
+ output_image_gallery_field,
+ output_text_field
],
- queue = None,
show_progress = "hidden"
)
- if use_image_gallery == 1:
- output_image_field.select(
- fn = update_prompt_info_from_gallery,
- inputs = None,
- outputs = [
- output_image_field,
- output_text_field
- ],
- show_progress = "hidden"
- )
if (
(enable_refiner == 1) or
@@ -3595,12 +4157,18 @@ with gr.Blocks(
if enable_refiner == 1:
triggers_array.extend([
+ base_model_field.change,
refining_selection_default_config_field.change,
refining_selection_online_config_normal_field.change,
- refining_selection_online_config_automatically_selected_field.change,
- model_configuration_field.change
+ refining_selection_online_config_automatically_selected_field.change
])
+ for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array:
+
+ triggers_array.extend([
+ this_model_configuration_dropdown_field.change
+ ])
+
if enable_upscaler == 1:
triggers_array.extend([
@@ -3609,164 +4177,169 @@ with gr.Blocks(
gr.on(
triggers = triggers_array,
- fn = update_refiner_and_upscaler_status_function,
+ fn = None,
inputs = [
base_model_field,
- model_configuration_field,
refining_selection_default_config_field,
refining_selection_online_config_normal_field,
refining_selection_online_config_automatically_selected_field,
upscaling_selection_field
],
- outputs = [
- refiner_and_upscaler_text_field
- ],
- queue = None,
- show_progress = "hidden"
+ outputs = None,
+ show_progress = "hidden",
+ queue = False,
+ js = update_refiner_and_upscaler_status_function_js
+ )
+
+
+
+ create_image_function_inputs = [
+
+ base_model_field,
+
+ prompt_field,
+ negative_prompt_field,
+
+ scheduler_field,
+
+ image_width_field,
+ image_height_field,
+ guidance_scale_field,
+ base_model_num_inference_steps_field,
+ base_model_num_inference_steps_field_for_sdxl_turbo_field,
+ seed_field,
+ add_seed_into_pipe_field,
+
+ refining_selection_default_config_field,
+ refining_selection_online_config_normal_field,
+ refining_selection_online_config_automatically_selected_field,
+
+ refining_denoise_start_for_default_config_field,
+ refining_use_denoising_start_in_base_model_when_using_refiner_field,
+ refining_base_model_output_to_refiner_is_in_latent_space_field,
+
+ refining_denoise_start_for_online_config_field,
+ refining_number_of_iterations_for_online_config_field,
+
+ upscaling_selection_field,
+ upscaling_num_inference_steps_field,
+
+ image_gallery_array_state,
+ prompt_information_array_state,
+ last_model_configuration_name_selected_state,
+ last_refiner_name_selected_state,
+ last_upscaler_name_selected_state,
+ stored_pipe_state,
+ stored_refiner_state,
+ stored_upscaler_state
+
+ ]
+
+ for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array:
+
+ create_image_function_inputs.append(
+ this_model_configuration_dropdown_field
)
+
+
generate_image_btn_click_event = generate_image_btn.click(
fn = before_create_image_function,
- inputs = [],
+ inputs = None,
outputs = [
generate_image_btn,
output_image_field,
- output_text_field,
- log_text_field#,
-
- #prompt_field,
- #negative_prompt_field,
- #refining_selection_default_config_field,
- #refining_denoise_start_for_default_config_field,
- #refining_use_denoising_start_in_base_model_when_using_refiner_field,
- #refining_base_model_output_to_refiner_is_in_latent_space_field,
- #refining_selection_online_config_normal_field,
- #refining_selection_online_config_automatically_selected_field,
- #refining_denoise_start_for_online_config_field,
- #refining_number_of_iterations_for_online_config_field,
- #upscaling_selection_field,
- #upscaling_num_inference_steps_field,
- #base_model_field,
- #model_configuration_field,
- #image_width_field,
- #image_height_field,
- #base_model_num_inference_steps_field,
- #base_model_num_inference_steps_field_for_sdxl_turbo_field,
- #guidance_scale_field,
- #seed_field
+ output_image_gallery_field,
+ output_text_field
],
- show_progress = "minimal",
+ show_progress = "hidden",
queue = True
).then(
fn = create_image_function,
- inputs = [
- base_model_field,
- model_configuration_field,
- prompt_field,
- negative_prompt_field,
- image_width_field,
- image_height_field,
- guidance_scale_field,
- base_model_num_inference_steps_field,
- base_model_num_inference_steps_field_for_sdxl_turbo_field,
- seed_field,
-
- refining_selection_default_config_field,
- refining_selection_online_config_normal_field,
- refining_selection_online_config_automatically_selected_field,
-
- refining_denoise_start_for_default_config_field,
- refining_use_denoising_start_in_base_model_when_using_refiner_field,
- refining_base_model_output_to_refiner_is_in_latent_space_field,
-
- refining_denoise_start_for_online_config_field,
- refining_number_of_iterations_for_online_config_field,
-
- upscaling_selection_field,
- upscaling_num_inference_steps_field
- ],
+ inputs = create_image_function_inputs,
outputs = [
output_image_field,
+ output_image_gallery_field,
output_text_field,
- prompt_truncated_field
+ prompt_truncated_field,
+ last_model_configuration_name_selected_state,
+ last_refiner_name_selected_state,
+ last_upscaler_name_selected_state,
+ stored_pipe_state,
+ stored_refiner_state,
+ stored_upscaler_state
],
+ show_progress = "full",
queue = True
).then(
fn = after_create_image_function,
- inputs = [],
+ inputs = None,
outputs = [
generate_image_btn,
- output_text_field,
- log_text_field#,
-
- #prompt_field,
- #negative_prompt_field,
- #refining_selection_default_config_field,
- #refining_denoise_start_for_default_config_field,
- #refining_use_denoising_start_in_base_model_when_using_refiner_field,
- #refining_base_model_output_to_refiner_is_in_latent_space_field,
- #refining_selection_online_config_normal_field,
- #refining_selection_online_config_automatically_selected_field,
- #refining_denoise_start_for_online_config_field,
- #refining_number_of_iterations_for_online_config_field,
- #upscaling_selection_field,
- #upscaling_num_inference_steps_field,
- #base_model_field,
- #model_configuration_field,
- #image_width_field,
- #image_height_field,
- #base_model_num_inference_steps_field,
- #base_model_num_inference_steps_field_for_sdxl_turbo_field,
- #guidance_scale_field,
- #seed_field
+ output_text_field
],
- queue = False
+ show_progress = "hidden",
+ queue = True
)
- sd_interface_load_kwargs = {
- "scroll_to_output": False,
- "show_progress": "full"
- }
+ if enable_close_command_prompt_button == 1:
- if show_image_creation_progress_log == 1:
+ # https://github.com/gradio-app/gradio/pull/2433/files
- sd_interface_continuous = sd_interface.load(
- fn = update_log_progress,
+ cancel_image_btn.click(
+ fn = cancel_image_processing,
inputs = None,
- outputs = [
- log_text_field
- ],
- every = 1,
- **sd_interface_load_kwargs
+ outputs = None,
+ cancels = [generate_image_btn_click_event],
+ queue = True
)
- else:
- sd_interface_continuous = sd_interface.load(
- **sd_interface_load_kwargs
- )
+ # Remove last comma
+ model_configuration_dropdown_field_values_for_js = model_configuration_dropdown_field_values_for_js[:-1]
- if enable_close_command_prompt_button == 1:
- # https://github.com/gradio-app/gradio/pull/2433/files
- cancel_image_btn.click(
- fn = cancel_image_processing,
- inputs = None,
- outputs = None,
- cancels = [generate_image_btn_click_event]
- )
+ script_on_load_js = """
+
+function scriptOnLoad() {{
+ "use strict";
+
+ window.modelConfigurationDropdownFieldValuesObject = {{{0}}};
+
+}}
+
+""".format(
+ model_configuration_dropdown_field_values_for_js
+)
+
+
+
+ sd_interface_load_kwargs = {
+ "scroll_to_output": False,
+ "show_progress": "full"
+ }
+
+ sd_interface_continuous = sd_interface.load(
+ fn = None,
+ inputs = None,
+ outputs = None,
+ js = script_on_load_js,
+ **sd_interface_load_kwargs
+ )
sd_interface.queue(
- max_size = 20
+ max_size = max_queue_size
)
+
+
inbrowser = False
if auto_open_browser == 1:
@@ -3775,10 +4348,10 @@ if auto_open_browser == 1:
sd_interface.launch(
inbrowser = inbrowser,
-# debug = True,
share = None,
show_api = False,
quiet = True,
show_error = True,
- max_threads = 1
+ state_session_capacity = 10000,
+ max_threads = 40
)