diff --git "a/spaghetti_ai_script.py" "b/spaghetti_ai_script.py" --- "a/spaghetti_ai_script.py" +++ "b/spaghetti_ai_script.py" @@ -201,8 +201,12 @@ use_safety_checker = 0 # You can automatically save the image file, and a text file with the # prompt details. # +# If you cancel an image in progress, you can choose whether to save +# images like that. +# auto_save_imagery = 1 +save_canceled_images = 0 ##### @@ -218,37 +222,49 @@ auto_save_imagery = 1 saved_images_folder_name = "saved_images" -#################### +##### # -# Auto Open Browser From Command Prompt +# Use Custom Temporary Files Folder +# +# Rather than saving temporary files here: +# %USERPROFILE%/AppData/Local/Temp/gradio +# You can keep them in the folder "gradio_temporary_files" that is inside +# "main_dir". However, a negative would be that your computer would not +# clear out the files like they might if they were stored in that Windows +# directory. # -auto_open_browser = 0 +use_custom_temporary_files_folder = 1 -#################### +##### # -# Allow Image Generation Cancellation +# Name of Gradio Temporary Files Folder # -# This allows canceling the image generation. It will not stop -# immediately. It will stop after completing the current step it is on. +# You can change the name of this folder if you want. # -enable_image_generation_cancellation = 1 +gradio_temporary_files_folder_name = "gradio_temporary_files" #################### # -# Include Close Command Prompt / Cancel Button +# Auto Open Browser From Command Prompt +# + +auto_open_browser = 0 + +#################### + # -# This will likely be removed in the future. +# Allow Image Generation Cancellation # -# This doesn't work well at all. It just closes the command prompt. I -# might remove this eventually. +# This allows canceling the image generation. It will not stop +# immediately. It will stop after completing the current step it is on. # -enable_close_command_prompt_button = 0 +enable_image_generation_cancellation = 1 #################### @@ -367,13 +383,13 @@ default_use_torch_manual_seed_but_do_not_add_to_pipe = 0 #################### # -# Save Base Image When Using Refiner +# Save Base Image When Using Refiner or Upscaler # # The image will be shown on the page. If you save images automatically, # it will also be saved. # -default_save_base_image_when_using_refiner = 1 +default_save_base_image_when_using_refiner_or_upscaler = 1 #################### @@ -411,10 +427,40 @@ allow_other_model_versions = 1 # Image Preview # -use_image_preview = 1 -image_preview_step_interval = 10 -image_preview_seconds_interval = 60 +enable_image_preview = 1 +image_preview_step_interval = 5 #10 +image_preview_seconds_interval = 30 load_image_preview_frequency_in_seconds = 2 +delete_preview_images_immediately = 1 + +default_create_preview_images = 1 # 1 +default_do_not_create_refining_preview_images = 0 # 0 +default_do_not_create_upscaling_preview_images = 0 # 1 + +#################### + +# +# Allow Longer Prompts for Stable Diffusion 1.5 Based Models +# + +enable_longer_prompts = 1 + +allow_longer_prompts_for_sd_1_5_based_models = 1 + +base_models_supporting_special_long_prompt_method_object = { + "photoreal": 1, + "sd_1_5_runwayml": 1 +} + +#################### + +# +# PYTORCH_CUDA_ALLOC_CONF +# +# https://pytorch.org/docs/stable/notes/cuda.html +# + +pytorch_cuda_alloc_conf_max_split_size_mb = 6000 # 8000 #################### @@ -469,8 +515,9 @@ base_model_names_object = { # Number of steps in upscaler was 5 during this period. # There were changes on this date. # -# http://127.0.0.1:7860/?model=sdxl&model_config=sdxl_1-0&scheduler=model_default&width=768&height=768&guidance=10&steps=50&prompt=&neg_prompt=&seed=&add_seed=yes&refiner=yes&denoise_start=0.95&refiner_steps=100&use_denoise_end=no&latent_space_before_refiner=yes&upscaler=no&upscaler_steps=15 +# http://127.0.0.1:7860/?model=sdxl&model_config=sdxl_1-0&scheduler=model_default&width=768&height=768&guidance=10&steps=50&seed=&add_seed=yes&refiner=yes&denoise_start=0.95&refiner_steps=100&use_denoise_end=no&latent_space_before_refiner=yes&upscaler=no&upscaler_steps=15&prompt=&neg_prompt= # + ########## # # PhotoReal 3.8.1 @@ -478,11 +525,8 @@ base_model_names_object = { # - Valid from December 29th to present. # Base model was changed from 3.7.5 to 3.8.1: "circulus/canvers-real-v3.8.1" # -# For some reason, I haven't been able to match this version of the -# configuration yet. That means this link doesn't work: -# -# http://127.0.0.1:7860/?model=photoreal&model_config=photoreal_3-8-1&scheduler=model_default&width=768&height=768&guidance=5&steps=50&prompt=&neg_prompt=&seed=&add_seed=no&use_torch_manual_seed_but_not_in_generator=yes&refiner=no&denoise_start=0.95&refiner_steps=&use_denoise_end=no&latent_space_before_refiner=no&upscaler=no&upscaler_steps= -# +# http://127.0.0.1:7860/?model=photoreal&model_config=photoreal_3-8-1&scheduler=model_default&width=768&height=768&guidance=5&steps=50&seed=&add_seed=no&use_torch_manual_seed_but_not_in_generator=yes&refiner=no&denoise_start=0.95&use_denoise_end=no&latent_space_before_refiner=no&upscaler=no&prompt=&neg_prompt= + #################### # # PhotoReal 3.7.5 @@ -490,7 +534,7 @@ base_model_names_object = { # - Valid from November 12th to December 29th. # Base model was changed from 3.6 to 3.7.5: "circulus/canvers-real-v3.7.5" # -# http://127.0.0.1:7860/?model=photoreal&model_config=photoreal_3-7-5&scheduler=model_default&width=768&height=768&guidance=7&steps=50&prompt=&neg_prompt=&seed=&add_seed=no&use_torch_manual_seed_but_not_in_generator=yes&refiner=no&denoise_start=0.95&refiner_steps=&use_denoise_end=no&latent_space_before_refiner=no&upscaler=no&upscaler_steps= +# http://127.0.0.1:7860/?model=photoreal&model_config=photoreal_3-7-5&scheduler=model_default&width=768&height=768&guidance=7&steps=50&seed=&add_seed=no&use_torch_manual_seed_but_not_in_generator=yes&refiner=no&denoise_start=0.95&use_denoise_end=no&latent_space_before_refiner=no&upscaler=no&prompt=&neg_prompt= # #################### # @@ -500,7 +544,7 @@ base_model_names_object = { # "circulus/canvers-realistic-v3.6" was already in effect. # But there were changes on this date. # -# http://127.0.0.1:7860/?model=photoreal&model_config=photoreal_3-6&scheduler=model_default&width=768&height=768&guidance=7&steps=50&prompt=&neg_prompt=&seed=&add_seed=no&use_torch_manual_seed_but_not_in_generator=yes&refiner=no&denoise_start=0.95&refiner_steps=&use_denoise_end=no&latent_space_before_refiner=no&upscaler=no&upscaler_steps= +# http://127.0.0.1:7860/?model=photoreal&model_config=photoreal_3-6&scheduler=model_default&width=768&height=768&guidance=7&steps=50&seed=&add_seed=no&use_torch_manual_seed_but_not_in_generator=yes&refiner=no&denoise_start=0.95&use_denoise_end=no&latent_space_before_refiner=no&upscaler=no&prompt=&neg_prompt= # #################### @@ -560,6 +604,11 @@ base_models_not_supporting_denoising_end_for_base_model_object = { "sd_1_5_runwayml": 1 } +base_models_not_supporting_base_model_output_in_latent_space_to_refiner_object = { + "photoreal": 1, + "sd_1_5_runwayml": 1 +} + #################### hugging_face_refiner_partial_path = "stabilityai/stable-diffusion-xl-refiner-1.0" @@ -726,7 +775,7 @@ if torch.cuda.is_available(): device = "cuda" PYTORCH_CUDA_ALLOC_CONF = { - "max_split_size_mb": 8000 + "max_split_size_mb": pytorch_cuda_alloc_conf_max_split_size_mb } torch.cuda.max_memory_allocated( device = device @@ -844,7 +893,7 @@ maximum_refining_steps_for_online_config_field = 100 # Upscaler Options maximum_upscaler_steps = 150 -default_upscaler_steps = 50 +default_upscaler_steps = 15 @@ -916,7 +965,8 @@ width_and_height_input_slider_steps = 8 -maximum_prompt_characer_count = 1000 +maximum_prompt_characer_count = 1250 +maximum_neg_prompt_characer_count = 1000 @@ -941,7 +991,15 @@ if device == "cpu": ending_html += """Spaghetti AI Logo -Tokens are not individual characters. If the prompt length is too long, the display will notify you what part of the prompt wasn't used. Changing just the image dimensions alone will change the image generated. For some models, trying to make a large image, such as 1024x1024, may add extra people and come out worse than using smaller dimensions. +Tokens are not individual characters. If the prompt length is too long, the display will notify you what part of the prompt wasn't used. Changing just the image dimensions alone will change the image generated. For some models, trying to make a large image, such as 1024x1024, may add extra people and come out worse than using smaller dimensions.""" + +if enable_longer_prompts == 1: + + ending_html += """ + +Longer prompts for Stable Diffusion 1.5 and PhotoReal are available using the method here. Another method will likely eventually be added for SDXL models.""" + +ending_html += """ The original script for this app was written by Manjushri.""" @@ -967,6 +1025,7 @@ cancel_image_button_in_progress_text = "Cancelling..." gradio_image_component_height = 300 gradio_image_gallery_component_height = 350 +gradio_extra_image_component_height = 250 @@ -981,6 +1040,7 @@ download_data_key_in_url = "download_data" model_configuration_key_in_url = "model_config" prompt_field_key_in_url = "prompt" negative_prompt_field_key_in_url = "neg_prompt" +allow_longer_prompts_for_sd_1_5_based_models_key_in_url = "longer_prompt_for_sd_1_5" scheduler_field_key_in_url = "scheduler" image_width_field_key_in_url = "width" image_height_field_key_in_url = "height" @@ -997,11 +1057,24 @@ use_denoising_start_in_base_model_when_using_refiner_key_in_url = "use_denoise_e base_model_output_to_refiner_is_in_latent_space_key_in_url = "latent_space_before_refiner" upscaler_key_in_url = "upscaler" upscaling_steps_key_in_url = "upscaler_steps" +show_base_image_when_using_refiner_or_upscaler_key_in_url = "show_base_image_when_using_refiner_or_upscaler" +show_refined_image_when_using_upscaler_key_in_url = "show_refined_image_when_using_upscaler" +create_preview_images_key_in_url = "do_preview" +do_not_create_refining_preview_images_key_in_url = "no_refining_preview" +do_not_create_upscaling_preview_images_key_in_url = "do_upscaling_preview" theme_key_in_url = "theme" special_theme_key_in_url = "__theme" +prompt_textbox_label_with_length_limit = "Prompt (77 token limit):" +prompt_textbox_label_with_no_length_limit = "Prompt:" + +negative_prompt_textbox_label_with_length_limit = "Negative Prompt (77 token limit):" +negative_prompt_textbox_label_with_no_length_limit = "Negative Prompt:" + + + ############################################################################### ############################################################################### # @@ -1184,6 +1257,19 @@ if auto_save_imagery == 1: +gradio_temporary_files_dir = os.environ.get("GRADIO_TEMP_DIR") + +if use_custom_temporary_files_folder == 1: + + gradio_temporary_files_dir = main_dir + "/" + gradio_temporary_files_folder_name + + if not os.path.exists(gradio_temporary_files_dir): + os.makedirs(gradio_temporary_files_dir) + + os.environ["GRADIO_TEMP_DIR"] = gradio_temporary_files_dir + + + if device == "cpu": use_sequential_cpu_offload_for_base_model = 0 @@ -1346,6 +1432,18 @@ if default_base_model_output_to_refiner_is_in_latent_space == 1: +default_base_model_output_in_latent_space_note_field_row_visibility = False + +if ( + (default_refiner_selected == 1) and + base_models_not_supporting_base_model_output_in_latent_space_to_refiner_object[default_base_model] and + (default_base_model_output_to_refiner_is_in_latent_space == 1) +): + + default_base_model_output_in_latent_space_note_field_row_visibility = True + + + refiner_accordion_visible = True if enable_refiner != 1: @@ -1435,6 +1533,28 @@ if default_dark_theme == 1: +default_allow_longer_prompts_row_visibility = True + +if enable_longer_prompts == 0: + + allow_longer_prompts_for_sd_1_5_based_models = 0 + + default_allow_longer_prompts_row_visibility = False + +default_allow_longer_prompts_for_sd_1_5_based_models_is_selected = False + +prompt_textbox_label_to_use = prompt_textbox_label_with_length_limit +negative_prompt_textbox_label_to_use = prompt_textbox_label_with_length_limit + +if allow_longer_prompts_for_sd_1_5_based_models == 1: + + default_allow_longer_prompts_for_sd_1_5_based_models_is_selected = True + + prompt_textbox_label_to_use = prompt_textbox_label_with_no_length_limit + negative_prompt_textbox_label_to_use = prompt_textbox_label_with_no_length_limit + + + default_add_seed_into_pipe_is_selected = False if default_add_seed_into_pipe == 1: @@ -1451,17 +1571,41 @@ if default_use_torch_manual_seed_but_do_not_add_to_pipe == 1: -default_save_base_image_when_using_refiner_is_selected = False +default_save_base_image_when_using_refiner_or_upscaler_is_selected = False + +if default_save_base_image_when_using_refiner_or_upscaler == 1: + + default_save_base_image_when_using_refiner_or_upscaler_is_selected = True + + + +default_create_preview_images_is_selected = False + +if default_create_preview_images == 1: + + default_create_preview_images_is_selected = True + + + +default_do_not_create_refining_preview_images_is_selected = False + +if default_do_not_create_refining_preview_images == 1: + + default_do_not_create_refining_preview_images_is_selected = True + -if default_save_base_image_when_using_refiner == 1: - default_save_base_image_when_using_refiner_is_selected = True +default_do_not_create_upscaling_preview_images_is_selected = False + +if default_do_not_create_upscaling_preview_images == 1: + + default_do_not_create_upscaling_preview_images_is_selected = True default_save_refined_image_when_using_upscaler_is_selected = False -if default_save_base_image_when_using_refiner == 1: +if default_save_base_image_when_using_refiner_or_upscaler == 1: default_save_refined_image_when_using_upscaler_is_selected = True @@ -1502,6 +1646,7 @@ if maximum_seed <= 9007199254740992: current_preview_image = "" +previous_preview_image = None current_preview_image_user_id = 0 @@ -1650,6 +1795,7 @@ def show_message( def nice_elapsed_time( seconds ): + # Google AI Code hours = seconds // 3600 @@ -1776,7 +1922,7 @@ def prompt_valid(prompt_field): def negative_prompt_valid(negative_prompt_field): try: negative_prompt_field_str = str(negative_prompt_field) - if len(negative_prompt_field_str) <= maximum_prompt_characer_count: + if len(negative_prompt_field_str) <= maximum_neg_prompt_characer_count: return True else: return False @@ -1975,14 +2121,12 @@ def refiner_denoise_start_valid( ): try: refiner_denoise_start_num = float(refiner_denoise_start_str) - refiner_denoise_start_num_times_100 = (refiner_denoise_start_num * 100) - refiner_denoise_start_num_times_100_with_int = int(refiner_denoise_start_num_times_100) - refiner_denoise_start_input_slider_steps_times_100 = (float(refiner_denoise_start_input_slider_steps) * 100) + refiner_denoise_start_rounded = rounded_number(refiner_denoise_start_str, 2) + if ( (refiner_denoise_start_num >= float(minimum_refiner_denoise_start)) and (refiner_denoise_start_num <= float(maximum_refiner_denoise_start)) and - (refiner_denoise_start_num_times_100 == refiner_denoise_start_num_times_100_with_int) and - ((refiner_denoise_start_num_times_100 % refiner_denoise_start_input_slider_steps_times_100) == 0) + (int(refiner_denoise_start_rounded % float(refiner_denoise_start_input_slider_steps)) == 0) ): return True else: @@ -2698,6 +2842,7 @@ def create_image_generation_information ( prompt_text_not_used_substring, negative_prompt_text, negative_prompt_text_not_used_substring, + allow_longer_prompts_for_sd_1_5_based_models_field_value, image_width, image_height, actual_seed, @@ -2783,9 +2928,17 @@ def create_image_generation_information ( "Upscaled Dimensions: " + str(upscaled_image_width) + "x" + str(upscaled_image_height) + " px" ]) - info_about_prompt_lines_array.extend([ - "Seed: " + str(actual_seed) - ]) + if ( + (add_seed_into_pipe == 1) or + ( + (add_seed_into_pipe != 1) and + (use_torch_manual_seed_but_do_not_add_to_pipe_field_value == 1) + ) + ): + + info_about_prompt_lines_array.extend([ + "Seed: " + str(actual_seed) + ]) nice_seed_added_to_generation = "No" @@ -2837,7 +2990,16 @@ def create_image_generation_information ( nice_scheduler_name += " (model default)" info_about_prompt_lines_array.extend([ - "Model: " + nice_model_name, + "Model: " + nice_model_name + ]) + + if allow_longer_prompts_for_sd_1_5_based_models_field_value == 1: + + info_about_prompt_lines_array.extend([ + "Longer prompt allowed using special method? Yes" + ]) + + info_about_prompt_lines_array.extend([ "Scheduler/Sampler: " + nice_scheduler_name ]) @@ -3022,7 +3184,7 @@ def load_image_preview ( user_id_state_value = user_id_state.value -# print ("Image preview check run", current_preview_image_user_id, user_id_state_value) +# print ("Image preview check run") if ( (user_id_state_value > 0) and @@ -3053,7 +3215,9 @@ def load_image_preview ( # ##################### -def before_create_image_function (): +def before_create_image_function ( + create_preview_images +): generate_image_button_update = gr.Button( value = generate_image_button_in_progress_text, @@ -3061,6 +3225,26 @@ def before_create_image_function (): interactive = False ) + + + output_base_model_image_field_accordion_update = gr.Accordion( + visible = False + ) + + output_base_model_image_field_update = gr.Image( + value = None + ) + + output_refiner_image_field_accordion_update = gr.Accordion( + visible = False + ) + + output_refiner_image_field_update = gr.Image( + value = None + ) + + + output_text_field_update = gr.Textbox( visible = False ) @@ -3081,11 +3265,14 @@ def before_create_image_function (): value = "" ) - error_text_field_update = gr.HTML( - value = "", + error_text_field_accordion_update = gr.Accordion( visible = False ) + error_text_field_update = gr.HTML( + value = "" + ) + image_generation_id = int(random.randrange(0, 1000000000)) @@ -3098,32 +3285,43 @@ def before_create_image_function (): before_create_image_object = { generate_image_button: generate_image_button_update, + output_base_model_image_field_accordion: output_base_model_image_field_accordion_update, + output_base_model_image_field: output_base_model_image_field_update, + output_refiner_image_field_accordion: output_refiner_image_field_accordion_update, + output_refiner_image_field: output_refiner_image_field_update, output_text_field: output_text_field_update, prompt_truncated_field_group: prompt_truncated_field_group_update, prompt_truncated_field: prompt_truncated_field_update, negative_prompt_truncated_field_group: negative_prompt_truncated_field_group_update, negative_prompt_truncated_field: negative_prompt_truncated_field_update, + error_text_field_accordion: error_text_field_accordion_update, error_text_field: error_text_field_update, image_generation_id_state: image_generation_id_state_update } - if use_image_preview == 1: + if create_preview_images == 1: output_image_field_update = gr.Image( height = 100 ) output_image_gallery_field_update = gr.Gallery( + label = "", height = 100 ) - output_image_preview_field_row_update = gr.Row( + output_image_preview_field_accordion_update = gr.Accordion( visible = True ) + output_image_preview_field_update = gr.Image( + value = None + ) + before_create_image_object.update({ output_image_field: output_image_field_update, - output_image_preview_field_row: output_image_preview_field_row_update + output_image_preview_field_accordion: output_image_preview_field_accordion_update, + output_image_preview_field: output_image_preview_field_update }) if enable_image_generation_cancellation == 1: @@ -3174,7 +3372,7 @@ def after_create_image_function (): output_text_field: output_text_field_update } - if use_image_preview == 1: + if enable_image_preview == 1: output_image_field_update = gr.Image( height = gradio_image_component_height @@ -3184,14 +3382,14 @@ def after_create_image_function (): height = gradio_image_gallery_component_height ) - output_image_preview_field_row_update = gr.Row( + output_image_preview_field_accordion_update = gr.Accordion( visible = False ) after_create_image_object.update({ output_image_field: output_image_field_update, output_image_gallery_field: output_image_gallery_field_update, - output_image_preview_field_row: output_image_preview_field_row_update + output_image_preview_field_accordion: output_image_preview_field_accordion_update }) if enable_image_generation_cancellation == 1: @@ -3318,10 +3516,12 @@ def create_image_from_latents ( if ( (model_to_use == "sdxl") or - (model_to_use == "sdxl_turbo") + (model_to_use == "sdxl_turbo") or + (model_to_use == "refiner") ): # https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py + # https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl_img2img.py # make sure the VAE is in float32 mode, as it overflows in float16 needs_upcasting = pipe.vae.dtype == torch.float16 and pipe.vae.config.force_upcast @@ -3361,36 +3561,6 @@ def create_image_from_latents ( return image - elif model_to_use == "upscaler": - - # https://github.com/huggingface/diffusers/blob/main/src/diffusers/pipelines/stable_diffusion/pipeline_stable_diffusion_upscale.py - - # make sure the VAE is in float32 mode, as it overflows in float16 - needs_upcasting = pipe.vae.dtype == torch.float16 and pipe.vae.config.force_upcast - - if needs_upcasting: - pipe.upcast_vae() - - # Ensure latents are always the same type as the VAE - latents = latents.to(next(iter(pipe.vae.post_quant_conv.parameters())).dtype) - image = pipe.vae.decode(latents / pipe.vae.config.scaling_factor, return_dict=False)[0] - - # cast back to fp16 if needed - if needs_upcasting: - pipe.vae.to(dtype=torch.float16) - - if int(is_final_image) == 1: - - # apply watermark if available - if pipe.watermark is not None: - image = pipe.watermark.apply_watermark(image) - - do_denormalize = [True] * image.shape[0] - - image = pipe.image_processor.postprocess(image, output_type="pil", do_denormalize=do_denormalize) - - return image - return "" @@ -3448,6 +3618,62 @@ def create_preview_image ( +##################### +# +# Get Pipeline Embeds +# +# This is used to get longer prompts for Stable Diffusion 1.5 based +# models. +# +##################### + +def get_pipeline_embeds( + pipeline, + prompt, + negative_prompt, + device +): + + """ Get pipeline embeds for prompts bigger than the maxlength of the pipe + :param pipeline: + :param prompt: + :param negative_prompt: + :param device: + :return: + """ + max_length = pipeline.tokenizer.model_max_length + + # simple way to determine length of tokens + count_prompt = len(prompt.split(" ")) + count_negative_prompt = len(negative_prompt.split(" ")) + + # create the tensor based on which prompt is longer + if count_prompt >= count_negative_prompt: + input_ids = pipeline.tokenizer(prompt, return_tensors="pt", truncation=False).input_ids.to(device) + shape_max_length = input_ids.shape[-1] + negative_ids = pipeline.tokenizer(negative_prompt, truncation=False, padding="max_length", + max_length=shape_max_length, return_tensors="pt").input_ids.to(device) + + else: + negative_ids = pipeline.tokenizer(negative_prompt, return_tensors="pt", truncation=False).input_ids.to(device) + shape_max_length = negative_ids.shape[-1] + input_ids = pipeline.tokenizer(prompt, return_tensors="pt", truncation=False, padding="max_length", + max_length=shape_max_length).input_ids.to(device) + + concat_embeds = [] + neg_embeds = [] + for i in range(0, shape_max_length, max_length): + concat_embeds.append(pipeline.text_encoder(input_ids[:, i: i + max_length])[0]) + neg_embeds.append(pipeline.text_encoder(negative_ids[:, i: i + max_length])[0]) + + return torch.cat(concat_embeds, dim=1), torch.cat(neg_embeds, dim=1) + + + + + + + ##################### # # Create Image Function @@ -3460,6 +3686,7 @@ def create_image_function ( base_model_field_index, prompt_text, negative_prompt_text, + allow_longer_prompts_for_sd_1_5_based_models_field_value, scheduler_index, image_width, image_height, @@ -3485,7 +3712,10 @@ def create_image_function ( stored_pipe_state, stored_refiner_state, stored_upscaler_state, - save_base_image_when_using_refiner_field_value, + create_preview_images, + do_not_create_refining_preview_images, + do_not_create_upscaling_preview_images, + save_base_image_when_using_refiner_or_upscaler_field_value, save_refined_image_when_using_upscaler_field_value, user_id_state, image_generation_id_state, @@ -3500,8 +3730,6 @@ def create_image_function ( current_image_generation_id_in_progress = image_generation_id_state_value - - global cancel_image_generation_ids_object global cancel_image_generation_times_object @@ -3511,6 +3739,10 @@ def create_image_function ( current_preview_image = None current_preview_image_user_id = 0 + error_count = 0 + + error_message_array = [] + if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): @@ -3526,14 +3758,9 @@ def create_image_function ( - image_width = int(image_width) - image_height = int(image_height) guidance_scale = float(guidance_scale) base_model_steps = int(base_model_steps) base_model_steps_field_for_sdxl_turbo = int(base_model_steps_field_for_sdxl_turbo) - actual_seed = int(actual_seed) - refining_denoise_start_field_value = rounded_number(refining_denoise_start_field_value, 2) - refining_steps_for_older_configuration_field_value = int(refining_steps_for_older_configuration_field_value) upscaling_steps = int(upscaling_steps) @@ -3614,30 +3841,48 @@ def create_image_function ( if not steps_valid(base_model_steps, base_model_name_value): error_function("Steps option is not valid.") - if not seed_valid(actual_seed): - error_function("Seed is not valid.") + if actual_seed == "": + actual_seed = generate_random_seed() + elif not seed_valid(actual_seed): - add_seed_into_pipe = numerical_bool(add_seed_into_pipe) - use_torch_manual_seed_but_do_not_add_to_pipe_field_value = numerical_bool(use_torch_manual_seed_but_do_not_add_to_pipe_field_value) + error_function("Seed is not valid.") - refining_selection_field_value = numerical_bool(refining_selection_field_value) + image_width = int(image_width) + image_height = int(image_height) + actual_seed = int(actual_seed) + refining_steps_for_older_configuration_field_value = int(refining_steps_for_older_configuration_field_value) + + allow_longer_prompts_for_sd_1_5_based_models_field_value = numerical_bool(allow_longer_prompts_for_sd_1_5_based_models_field_value) + add_seed_into_pipe = numerical_bool(add_seed_into_pipe) + use_torch_manual_seed_but_do_not_add_to_pipe_field_value = numerical_bool(use_torch_manual_seed_but_do_not_add_to_pipe_field_value) + + refining_selection_field_value = numerical_bool(refining_selection_field_value) refining_use_denoising_start_in_base_model_when_using_refiner_field_value = numerical_bool(refining_use_denoising_start_in_base_model_when_using_refiner_field_value) refining_base_model_output_to_refiner_is_in_latent_space_field_value = numerical_bool(refining_base_model_output_to_refiner_is_in_latent_space_field_value) + refining_steps_option_for_older_configuration_field_value = numerical_bool(refining_steps_option_for_older_configuration_field_value) + + use_upscaler = numerical_bool(upscaling_selection_field_value) + create_preview_images = numerical_bool(create_preview_images) + do_not_create_refining_preview_images = numerical_bool(do_not_create_refining_preview_images) + do_not_create_upscaling_preview_images = numerical_bool(do_not_create_upscaling_preview_images) + save_base_image_when_using_refiner_or_upscaler_field_value = numerical_bool(save_base_image_when_using_refiner_or_upscaler_field_value) + save_refined_image_when_using_upscaler_field_value = numerical_bool(save_refined_image_when_using_upscaler_field_value) - refining_steps_option_for_older_configuration_field_value = numerical_bool(refining_steps_option_for_older_configuration_field_value) + if create_preview_images == 0: - use_upscaler = numerical_bool(upscaling_selection_field_value) + do_not_create_refining_preview_images = 1 + do_not_create_upscaling_preview_images = 1 @@ -3645,6 +3890,8 @@ def create_image_function ( num_inference_steps_in_refiner = base_model_steps + base_model_output_forced_to_be_pil_image = 0 + if refining_selection_field_value: use_refiner = 1 @@ -3652,6 +3899,8 @@ def create_image_function ( if not refiner_denoise_start_valid(refining_denoise_start_field_value): error_function("Refiner denoise start is not valid.") + refining_denoise_start_field_value = rounded_number(refining_denoise_start_field_value, 2) + if refining_steps_option_for_older_configuration_field_value == 1: # For older configrations. Doesn't reflect actual number of steps. @@ -3661,10 +3910,44 @@ def create_image_function ( num_inference_steps_in_refiner = refining_steps_for_older_configuration_field_value + if base_model_name_value in base_models_not_supporting_base_model_output_in_latent_space_to_refiner_object: + + if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1: + base_model_output_forced_to_be_pil_image = 1 + + error_count += 1 + + error_message_array.extend([ + "The base model output must be a PIL image when using the refiner with the model you have chosen. If it wasn't, the image would not come out properly. Despite your selection, that change was made automatically." + ]) + + refining_base_model_output_to_refiner_is_in_latent_space_field_value = 0 + + + + if ( + ( + (allow_longer_prompts_for_sd_1_5_based_models_field_value == 1) and + (base_model_name_value not in base_models_supporting_special_long_prompt_method_object) + ) or + (use_refiner == 1) or + (use_upscaler == 1) + ): + + allow_longer_prompts_for_sd_1_5_based_models_field_value = 0 + + error_count += 1 + + error_message_array.extend([ + "Longer prompts using the method we currently use cannot be done for certain models. It is also not available when using the refiner or upscaler. We disabled longer prompts for your image generation." + ]) + if use_upscaler == 1: + from diffusers import StableDiffusionLatentUpscalePipeline + if not upscaling_steps_valid(upscaling_steps): error_function("Upscaling steps option is not valid.") @@ -3699,6 +3982,9 @@ def create_image_function ( model_configuration_name_value ) + if device == "cuda": + torch.cuda.empty_cache() + last_model_configuration_name_selected_state_value = model_configuration_name_value else: @@ -3728,16 +4014,8 @@ def create_image_function ( - # Only 77 tokens are allowed in the prompt. 2 are reserved, meaning it is - # truncated to 75. This happens automatically, but we want to tell people - # that - - tokenizer = pipe.tokenizer - - max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens - - token_length_of_prompt_text = len(tokenizer.tokenize(prompt_text)) - token_length_of_negative_prompt_text = len(tokenizer.tokenize(negative_prompt_text)) + prompt_text_to_use_inside_pipeline = prompt_text + negative_prompt_text_to_use_inside_pipeline = negative_prompt_text @@ -3760,67 +4038,99 @@ def create_image_function ( prompt_text_not_used_substring = "" negative_prompt_text_not_used_substring = "" - truncated_prompts = 0 + if allow_longer_prompts_for_sd_1_5_based_models_field_value == 1: - partial_prompt_or_negative_prompt_length_too_long_message = "" + # Longer prompts are allowed - if token_length_of_prompt_text > max_token_length_of_model: - - ( - prompt_text, - prompt_text_not_used_substring - ) = truncate_prompt( + prompt_embeds, negative_prompt_embeds = get_pipeline_embeds( pipe, - prompt_text + prompt_text, + negative_prompt_text, + device ) - prompt_truncated_field_group_update = gr.Group( - visible = True - ) + prompt_text_to_use_inside_pipeline = None + negative_prompt_text_to_use_inside_pipeline = None - prompt_truncated_field_update = gr.Textbox( - value = prompt_text_not_used_substring - ) + else: - truncated_prompts += 1 + # Only 77 tokens are allowed in the prompt. 2 are reserved, meaning + # it is truncated to 75. This happens automatically, but we want to + # tell people that - partial_prompt_or_negative_prompt_length_too_long_message += "prompt" + prompt_embeds = None + negative_prompt_embeds = None - if token_length_of_negative_prompt_text > max_token_length_of_model: + tokenizer = pipe.tokenizer - ( - negative_prompt_text, - negative_prompt_text_not_used_substring - ) = truncate_prompt( - pipe, - negative_prompt_text - ) + max_token_length_of_model = pipe.tokenizer.model_max_length - number_of_reserved_tokens - negative_prompt_truncated_field_group_update = gr.Group( - visible = True - ) + token_length_of_prompt_text = len(tokenizer.tokenize(prompt_text)) + token_length_of_negative_prompt_text = len(tokenizer.tokenize(negative_prompt_text)) - negative_prompt_truncated_field_update = gr.Textbox( - value = negative_prompt_text_not_used_substring - ) - truncated_prompts += 1 - if truncated_prompts == 2: - partial_prompt_or_negative_prompt_length_too_long_message += " and " + truncated_prompts = 0 + + partial_prompt_or_negative_prompt_length_too_long_message = "" + + if token_length_of_prompt_text > max_token_length_of_model: + + ( + prompt_text, + prompt_text_not_used_substring + ) = truncate_prompt( + pipe, + prompt_text + ) + + prompt_truncated_field_group_update = gr.Group( + visible = True + ) + + prompt_truncated_field_update = gr.Textbox( + value = prompt_text_not_used_substring + ) - partial_prompt_or_negative_prompt_length_too_long_message += "negative prompt" + truncated_prompts += 1 - if len(partial_prompt_or_negative_prompt_length_too_long_message) > 0: + partial_prompt_or_negative_prompt_length_too_long_message += "prompt" - partial_prompt_or_negative_prompt_length_too_long_message += " was" + if token_length_of_negative_prompt_text > max_token_length_of_model: + + ( + negative_prompt_text, + negative_prompt_text_not_used_substring + ) = truncate_prompt( + pipe, + negative_prompt_text + ) - if truncated_prompts == 2: - partial_prompt_or_negative_prompt_length_too_long_message += " were" + negative_prompt_truncated_field_group_update = gr.Group( + visible = True + ) - prompt_or_negative_prompt_length_too_long_message = "Note: Part of your " + partial_prompt_or_negative_prompt_length_too_long_message + " truncated automatically because it was too long." + negative_prompt_truncated_field_update = gr.Textbox( + value = negative_prompt_text_not_used_substring + ) - show_message(prompt_or_negative_prompt_length_too_long_message) + truncated_prompts += 1 + + if truncated_prompts == 2: + partial_prompt_or_negative_prompt_length_too_long_message += " and " + + partial_prompt_or_negative_prompt_length_too_long_message += "negative prompt" + + if len(partial_prompt_or_negative_prompt_length_too_long_message) > 0: + + partial_prompt_or_negative_prompt_length_too_long_message += " was" + + if truncated_prompts == 2: + partial_prompt_or_negative_prompt_length_too_long_message += " were" + + prompt_or_negative_prompt_length_too_long_message = "Note: Part of your " + partial_prompt_or_negative_prompt_length_too_long_message + " truncated automatically because it was too long." + + show_message(prompt_or_negative_prompt_length_too_long_message) @@ -3888,18 +4198,15 @@ def create_image_function ( current_base_model_generation_start_time = 0 -# global current_base_model_generation_start_time -# current_base_model_generation_start_time = time.time() global saved_final_base_model_pil_image_if_using_refiner saved_final_base_model_pil_image_if_using_refiner = None global upscaled_image_canceled - global upscaled_image_canceled_latents - upscaled_image_canceled = 0 - + global canceled_pil_image + canceled_pil_image = None def callback_function_for_base_model_progress( callback_pipe, @@ -3922,7 +4229,6 @@ def create_image_function ( seconds_per_step = 0 -# if callback_step_number >= 1: if callback_step_number >= 2: seconds_per_step = ((time.time() - current_base_model_generation_start_time) / callback_step_number) @@ -3942,12 +4248,23 @@ def create_image_function ( pipe._interrupt = True if ( - use_image_preview and + create_preview_images and ( - ((int(callback_step_index) % image_preview_step_interval) == 0) or - (seconds_per_step >= image_preview_seconds_interval) - ) and - (callback_step_number < current_estimated_total_base_model_steps) + ( + ( + ((int(callback_step_index) % image_preview_step_interval) == 0) or + (seconds_per_step >= image_preview_seconds_interval) + ) and + (callback_step_number < current_estimated_total_base_model_steps) + ) or + ( + ( + (use_refiner == 1) or + (use_upscaler == 1) + ) and + (callback_step_number == current_estimated_total_base_model_steps) + ) + ) ): latents = callback_kwargs["latents"] @@ -3958,14 +4275,6 @@ def create_image_function ( is_final_image = 0 - pil_image = create_image_from_latents( - model_to_use, - pipe, - latents, - generator, - is_final_image - ) - create_preview_image( model_to_use, user_id_state_value, @@ -3979,7 +4288,10 @@ def create_image_function ( cancel_process or ( (base_model_steps == callback_step_number) and - (use_refiner == 1) + ( + (use_refiner == 1) or + (use_upscaler == 1) + ) ) ): @@ -3991,7 +4303,7 @@ def create_image_function ( saved_final_base_model_pil_image_if_using_refiner = create_image_from_latents( base_model_name_value, - pipe, + callback_pipe, latents, generator, is_final_image @@ -4020,12 +4332,10 @@ def create_image_function ( if ( (show_image_creation_progress_log == 1) or enable_image_generation_cancellation or - use_image_preview + create_preview_images ): current_refiner_generation_start_time = 0 -# global current_refiner_generation_start_time -# current_refiner_generation_start_time = time.time() def callback_function_for_refiner_progress( callback_pipe, @@ -4048,7 +4358,6 @@ def create_image_function ( seconds_per_step = 0 -# if callback_step_number >= 1: if callback_step_number >= 2: seconds_per_step = ((time.time() - current_refiner_generation_start_time) / callback_step_number) @@ -4062,24 +4371,32 @@ def create_image_function ( refiner_progress_text = "Refiner processing started" if ( - use_image_preview and + (do_not_create_refining_preview_images == 0) and ( - ((int(callback_step_index) % image_preview_step_interval) == 0) or - (seconds_per_step >= image_preview_seconds_interval) - ) and - (callback_step_number < current_estimated_total_refiner_steps) + ( + ( + ((int(callback_step_index) % image_preview_step_interval) == 0) or + (seconds_per_step >= image_preview_seconds_interval) + ) and + (callback_step_number < current_estimated_total_refiner_steps) + ) or + ( + (use_upscaler == 1) and + (callback_step_number == current_estimated_total_refiner_steps) + ) + ) ): latents = callback_kwargs["latents"] temporary_extra = str(user_id_state_value) + "_refiner_" + str(callback_step_number) - model_to_use = base_model_name_value + model_to_use = "refiner" create_preview_image( model_to_use, user_id_state_value, - pipe, + callback_pipe, latents, generator, temporary_extra @@ -4110,8 +4427,6 @@ def create_image_function ( current_upscaler_generation_start_time = 0 -# global current_upscaler_generation_start_time -# current_upscaler_generation_start_time = time.time() def callback_function_for_upscaler_progress( callback_step_index, @@ -4133,7 +4448,6 @@ def create_image_function ( seconds_per_step = 0 -# if callback_step_number >= 1: if callback_step_number >= 2: seconds_per_step = ((time.time() - current_upscaler_generation_start_time) / callback_step_number) @@ -4147,19 +4461,21 @@ def create_image_function ( upscaler_progress_text = "Upscaler processing started" if ( - use_image_preview and + (do_not_create_upscaling_preview_images == 0) and ( - ((int(callback_step_index) % image_preview_step_interval) == 0) or - (seconds_per_step >= image_preview_seconds_interval) - ) and - (callback_step_number < current_estimated_total_upscaler_steps) + ( + ((int(callback_step_index) % image_preview_step_interval) == 0) or + (seconds_per_step >= image_preview_seconds_interval) + ) and + (callback_step_number < current_estimated_total_upscaler_steps) + ) ): latents = callback_latents temporary_extra = str(user_id_state_value) + "_upscale_" + str(callback_step_index) - model_to_use = "upscaler" + model_to_use = base_model_name_value create_preview_image( model_to_use, @@ -4192,13 +4508,23 @@ def create_image_function ( # upscaler._interrupt = True global upscaled_image_canceled - global upscaled_image_canceled_latents + global canceled_pil_image upscaled_image_canceled = 1 + model_to_use = base_model_name_value + latents = callback_latents - upscaled_image_canceled_latents = latents + is_final_image = 1 + + canceled_pil_image = create_image_from_latents( + model_to_use, + pipe, + latents, + generator, + is_final_image + ) raise Exception("end_at_this_step") @@ -4229,32 +4555,6 @@ def create_image_function ( output_text_field: output_text_field_update } -# output_image_field_update = gr.Image() -# output_image_gallery_field_update = gr.Gallery() -# output_text_field_update = gr.Textbox() -# prompt_truncated_field_group_update = gr.Group() -# prompt_truncated_field_update = gr.Textbox() -# negative_prompt_truncated_field_group_update = gr.Group() -# negative_prompt_truncated_field_update = gr.Textbox() -# error_text_field_update = gr.Textbox() - -# return ( -# output_image_field_update, -# output_image_gallery_field_update, -# output_text_field_update, -# prompt_truncated_field_group_update, -# prompt_truncated_field_update, -# negative_prompt_truncated_field_group_update, -# negative_prompt_truncated_field_update, -# last_model_configuration_name_selected_state_value, -# last_refiner_name_selected_state_value, -# last_upscaler_name_selected_state_value, -# pipe, -# stored_refiner_state, -# stored_upscaler_state, -# error_text_field_update -# ) - task_info_for_progress = "Initial image creation has begun" @@ -4281,11 +4581,11 @@ def create_image_function ( desc = task_info_for_progress ) - - base_image = pipe( - prompt = prompt_text, - negative_prompt = negative_prompt_text, + prompt = prompt_text_to_use_inside_pipeline, + negative_prompt = negative_prompt_text_to_use_inside_pipeline, + prompt_embeds = prompt_embeds, + negative_prompt_embeds = negative_prompt_embeds, width = image_width, height = image_height, num_inference_steps = base_model_steps, @@ -4297,28 +4597,75 @@ def create_image_function ( callback_on_step_end = callback_to_do_for_base_model_progress ) + if device == "cuda": + torch.cuda.empty_cache() + + have_upscaled_image = 0 have_refined_image = 0 base_image_for_next_step = base_image.images + refined_image_for_next_step = None upscaled_image_for_next_step = None - error_count = 0 - error_array = [] if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): use_refiner = 0 use_upscaler = 0 - refiner = {} - upscaler = {} + refiner_error = 0 - upscaler_error = 0 + + if ( + (use_refiner == 1) and + (last_refiner_name_selected_state_value == "") + ): + + if show_messages_in_command_prompt == 1: + + print ("Refiner is loading."); + + progress( + progress = 0, + desc = "Refiner is loading" + ) + + try: + + refiner = construct_refiner() + + if device == "cuda": + torch.cuda.empty_cache() + + last_refiner_name_selected_state_value = "refiner" + + except BaseException as error_message: + + use_refiner = 0 + + refiner_error = 1 + + error_count += 1 + + error_message_array.extend([ + "An error occurred while trying to load the refiner:\n" + str(error_message) + ]) + + if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): + + use_refiner = 0 + use_upscaler = 0 + + else: + + refiner = stored_refiner_state + + if ( (use_refiner == 1) or @@ -4327,220 +4674,213 @@ def create_image_function ( image_for_next_step = base_image_for_next_step - if use_refiner == 1: - try: - if (last_refiner_name_selected_state_value == ""): + if use_refiner == 1: - if show_messages_in_command_prompt == 1: + if show_messages_in_command_prompt == 1: - print ("Refiner is loading."); + print ("Refiner steps..."); - progress( - progress = 0, - desc = "Refiner is loading" - ) + if show_image_creation_progress_log == 1: - refiner = construct_refiner() + progress( + progress = 0, + desc = "Refining is beginning" + ) - last_refiner_name_selected_state_value = "refiner" + try: - else: + refined_image = refiner( + prompt = prompt_text_to_use_inside_pipeline, + negative_prompt = negative_prompt_text_to_use_inside_pipeline, + prompt_embeds = prompt_embeds, + negative_prompt_embeds = negative_prompt_embeds, + image = base_image_for_next_step, + num_inference_steps = num_inference_steps_in_refiner, + denoising_start = refining_denoise_start_field_value, + output_type = "pil", + generator = generator, + callback_on_step_end = callback_to_do_for_refiner_progress + ) - refiner = stored_refiner_state + if device == "cuda": + torch.cuda.empty_cache() - except BaseException as error_message: + except BaseException as error_message: - use_refiner = 0 + # User chose to refine image, but something went wrong. We won't + # use the refiner. Since we will be using the base image, where + # the output could be latents, we'll need to handle that later. - refiner_error = 1 + use_refiner = 0 + use_upscaler = 0 - error_count += 1 + refiner_error = 1 - error_array.extend([ - "Error " + str(error_count) + ": An error occurred while trying to load the refiner:\n" + str(error_message) - ]) + error_count += 1 - if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): + error_message_array.extend([ + "An error occurred while refining:\n" + str(error_message) + ]) - use_refiner = 0 - use_upscaler = 0 - if use_refiner == 1: - if show_messages_in_command_prompt == 1: + if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): - print ("Refiner steps..."); + use_upscaler = 0 - if show_image_creation_progress_log == 1: + if use_refiner == 1: - progress( - progress = 0, - desc = "Refining is beginning" - ) + # User chose to refine image and it succeded. - try: - - refined_image = refiner( - prompt = prompt_text, - negative_prompt = negative_prompt_text, - image = base_image_for_next_step, - num_inference_steps = num_inference_steps_in_refiner, - denoising_start = refining_denoise_start_field_value, - output_type = "pil", - generator = generator, - callback_on_step_end = callback_to_do_for_refiner_progress - ) + have_refined_image = 1 - except BaseException as error_message: + refined_image_for_next_step = refined_image.images - # User chose to refine image, but something went wrong. - # We won't use the refiner. Since we will be using the - # base image, where the output could be latents, we'll - # need to handle that later. + image_for_next_step = refined_image_for_next_step - use_refiner = 0 - use_upscaler = 0 - refiner_error = 1 - error_count += 1 + upscaler_error = 0 - error_array.extend([ - "Error " + str(error_count) + ": An error occurred while refining:\n" + str(error_message) - ]) - if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): - use_upscaler = 0 + if ( + (use_upscaler == 1) and + (last_upscaler_name_selected_state_value == "") + ): - if use_refiner == 1: + if show_messages_in_command_prompt == 1: - # User chose to refine image and it succeded. + print ("Upscaler is loading."); - have_refined_image = 1 + progress( + progress = 0, + desc = "Upscaler is loading" + ) - refined_image_for_next_step = refined_image.images[0] + try: - image_for_next_step = refined_image_for_next_step + upscaler = construct_upscaler() - if use_upscaler == 1: + if device == "cuda": + torch.cuda.empty_cache() - try: + last_upscaler_name_selected_state_value = "upscaler" - if (last_upscaler_name_selected_state_value == ""): + except BaseException as error_message: - if show_messages_in_command_prompt == 1: + use_upscaler = 0 - print ("Upscaler is loading."); + upscaler_error = 1 - progress( - progress = 0, - desc = "Upscaler is loading" - ) + error_count += 1 - upscaler = construct_upscaler() + error_message_array.extend([ + "An error occurred while trying to load the upscaler:\n" + str(error_message) + ]) - last_upscaler_name_selected_state_value = "upscaler" + if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): - else: + use_upscaler = 0 - upscaler = stored_upscaler_state + else: - except BaseException as error_message: + upscaler = stored_upscaler_state - use_upscaler = 0 - upscaler_error = 1 - error_count += 1 + if use_upscaler == 1: - error_array.extend([ - "Error " + str(error_count) + ": An error occurred while trying to load the upscaler:\n" + str(error_message) - ]) + if show_messages_in_command_prompt == 1: - if image_processing_is_canceled(user_id_state_value, image_generation_id_state_value): + print ("Upscaler steps..."); - use_upscaler = 0 + if show_image_creation_progress_log == 1: - if use_upscaler == 1: + progress( + progress = 0, + desc = "Upscaling is beginning" + ) - if show_messages_in_command_prompt == 1: - print ("Upscaler steps..."); - if show_image_creation_progress_log == 1: + if refining_base_model_output_to_refiner_is_in_latent_space_field_value == 1: - progress( - progress = 0, - desc = "Upscaling is beginning" - ) + if use_refiner == 1: - try: - - upscaled_image = upscaler( - prompt = prompt_text, - negative_prompt = negative_prompt_text, - image = image_for_next_step, - num_inference_steps = upscaling_steps, - guidance_scale = 0, - generator = generator, - callback = callback_to_do_for_upscaler_progress, - callback_steps = 1 - ) + image_for_next_step = refined_image_for_next_step[0] - except BaseException as error_message: + elif use_refiner == 0: - use_upscaler = 0 + image_for_next_step = base_image_for_next_step - if str(error_message) != "end_at_this_step": + else: - # User chose to upscale image, but something went - # wrong. We won't use the upscaler. - # - # If "error_message" is "end_at_this_step", then the - # user chose to cancel. We have to handle canceling - # when upscaling differently. We don't want to treat - # this as an actual error, so we don't do the below. + if use_refiner == 1: - upscaler_error = 1 + image_for_next_step = refined_image_for_next_step[0] - error_count += 1 + elif use_refiner == 0: - error_array.extend([ - "Error " + str(error_count) + ":\nAn error occurred while upscaling:\n" + str(error_message) - ]) + image_for_next_step = base_image_for_next_step[0] - if use_upscaler == 1: - # User chose to upscale image and it succeded. - have_upscaled_image = 1 + try: - upscaled_image_for_next_step = upscaled_image.images[0] + upscaled_image = upscaler( + prompt = prompt_text, + negative_prompt = negative_prompt_text, + image = image_for_next_step, + num_inference_steps = upscaling_steps, + guidance_scale = 0, + generator = generator, + callback = callback_to_do_for_upscaler_progress, + callback_steps = 1 + ) + if device == "cuda": + torch.cuda.empty_cache() + except BaseException as error_message: - if upscaled_image_canceled == 1: + use_upscaler = 0 - latents = upscaled_image_canceled_latents + if str(error_message) != "end_at_this_step": - temporary_extra = str(user_id_state_value) + "_upscale_canceled" + # User chose to upscale image, but something went wrong. We + # won't use the upscaler. + # + # If "error_message" is "end_at_this_step", then the user + # chose to cancel. We have to handle canceling when upscaling + # differently. We don't want to treat this as an actual + # error, so we don't do the below. - model_to_use = "upscaler" + upscaler_error = 1 - is_final_image = 1 + error_count += 1 - pil_image = create_image_from_latents( - model_to_use, - pipe, - latents, - generator, - is_final_image - ) + error_message_array.extend([ + "An error occurred while upscaling:\n" + str(error_message) + ]) - for key, value in enumerate(pil_image): + + + if use_upscaler == 1: + + # User chose to upscale image and it succeded. + + have_upscaled_image = 1 + + upscaled_image_for_next_step = upscaled_image.images[0] + + + + if upscaled_image_canceled == 1: + + for key, value in enumerate(canceled_pil_image): image_to_return = value elif have_upscaled_image == 1: @@ -4549,7 +4889,7 @@ def create_image_function ( elif have_refined_image == 1: - image_to_return = refined_image_for_next_step + image_to_return = refined_image_for_next_step[0] else: @@ -4592,6 +4932,7 @@ def create_image_function ( prompt_text_not_used_substring, negative_prompt_text, negative_prompt_text_not_used_substring, + allow_longer_prompts_for_sd_1_5_based_models_field_value, image_width, image_height, actual_seed, @@ -4625,14 +4966,6 @@ def create_image_function ( -# have_refined_image, -# have_upscaled_image, - -#Image before refining -#Image before upscaling - - - output_text_field_update = gr.Textbox( value = image_generation_information, lines = 12 @@ -4650,20 +4983,6 @@ def create_image_function ( - - - - -# save_base_image_when_using_refiner_field_value, -# save_refined_image_when_using_upscaler_field_value, - - - - - - - save_canceled_images = 0 - if ( (auto_save_imagery == 1) and ( @@ -4704,7 +5023,7 @@ def create_image_function ( final_image_file_path_and_file, image_to_return, add_generation_information_to_image, - info_to_save_in_image, + info_to_save_in_image ) final_image_text_file_path_and_file = saved_images_date_dir + file_name_without_extension + ".txt" @@ -4719,6 +5038,8 @@ def create_image_function ( value = image_to_return ) + + image_gallery_array_state_value.insert(0, image_to_return) prompt_information_array_state_value.insert(0, image_generation_information) @@ -4733,19 +5054,144 @@ def create_image_function ( - error_text_field_update = gr.Textbox( + output_base_model_image_field_accordion_update = gr.Accordion( visible = False ) - if (len(error_array) > 0): + output_base_model_image_field_update = gr.Image() - error_information = "\n\n".join(error_array) + if ( + ( + (have_refined_image == 1) or + (have_upscaled_image == 1) + ) and + (save_base_image_when_using_refiner_or_upscaler_field_value == 1) + ): - error_text_field_update = gr.Textbox( - value = error_information, + # Show base model image too + + for key, value in enumerate(saved_final_base_model_pil_image_if_using_refiner): + base_model_image_to_return = value + + if add_generation_information_to_image == 1: + + # Add generation info to base model image + + info_to_save_in_base_model_image = "\n-----------\nThis is the base model image that was saved during the image generation that matched the details below. Some of the details below do not apply to this image.\n\nImage generation information:\n" + image_generation_information + "\n-----------\n" + + base_model_image_to_return.info = {"parameters": info_to_save_in_base_model_image} + + if ( + (auto_save_imagery == 1) and + ( + (image_has_been_canceled == 0) or + (save_canceled_images == 1) + ) + ): + + base_model_image_file_path_and_file = saved_images_date_dir + file_name_without_extension + "_base_model.png" + + save_image_file( + base_model_image_file_path_and_file, + base_model_image_to_return, + add_generation_information_to_image, + info_to_save_in_base_model_image + ) + + output_base_model_image_field_accordion_update = gr.Accordion( + visible = True + ) + + output_base_model_image_field_update = gr.Image( + value = base_model_image_to_return + ) + + + + output_refiner_image_field_accordion_update = gr.Accordion( + visible = False + ) + + output_refiner_image_field_update = gr.Image() + + if ( + (use_refiner == 1) and + (have_upscaled_image == 1) and + (save_refined_image_when_using_upscaler_field_value == 1) + ): + + # Show refined image too + + refined_image_to_return = refined_image_for_next_step[0] + + if add_generation_information_to_image == 1: + + # Add generation info to refined image + + info_to_save_in_refined_image = "\n-----------\nThis is the refined image that was saved during the image generation that matched the details below. Some of the details below do not apply to this image.\n\nImage generation information:\n" + image_generation_information + "\n-----------\n" + + refined_image_to_return.info = {"parameters": info_to_save_in_refined_image} + + if ( + (auto_save_imagery == 1) and + ( + (image_has_been_canceled == 0) or + (save_canceled_images == 1) + ) + ): + + refined_image_file_path_and_file = saved_images_date_dir + file_name_without_extension + "_refined.png" + + save_image_file( + refined_image_file_path_and_file, + refined_image_to_return, + add_generation_information_to_image, + info_to_save_in_refined_image + ) + + output_refiner_image_field_accordion_update = gr.Accordion( visible = True ) + output_refiner_image_field_update = gr.Image( + value = refined_image_to_return + ) + + + + error_text_field_accordion_update = gr.Accordion( + visible = False + ) + + error_text_field_update = gr.Textbox( + value = "" + ) + + if (len(error_message_array) > 0): + + error_information = "" + + error_message_array_length = len(error_message_array) + + error_count = 1 + + for error_message in error_message_array: + + if (error_message_array_length > 1): + error_information += "Error " + str(error_count) + ":\n" + + error_information += error_message + "\n\n" + + error_count += 1 + + error_text_field_accordion_update = gr.Accordion( + visible = True + ) + + error_text_field_update = gr.Textbox( + value = error_information + ) + if show_messages_in_command_prompt == 1: @@ -4774,6 +5220,10 @@ def create_image_function ( return ( output_image_field_update, output_image_gallery_field_update, + output_base_model_image_field_accordion_update, + output_base_model_image_field_update, + output_refiner_image_field_accordion_update, + output_refiner_image_field_update, output_text_field_update, prompt_truncated_field_group_update, prompt_truncated_field_update, @@ -4785,6 +5235,7 @@ def create_image_function ( pipe, refiner, upscaler, + error_text_field_accordion_update, error_text_field_update ) @@ -4875,7 +5326,7 @@ def cancel_image_function( visible = True ) - output_image_preview_field_row_update = gr.Row( + output_image_preview_field_accordion_update = gr.Accordion( visible = False ) @@ -4886,7 +5337,7 @@ def cancel_image_function( cancel_image_object.update({ generate_image_button: generate_image_button_update, output_text_field: output_text_field_update, - output_image_preview_field_row: output_image_preview_field_row_update, + output_image_preview_field_accordion: output_image_preview_field_accordion_update, cancel_image_button_row: cancel_image_button_row_update, cancel_image_message_field_row: cancel_image_message_field_row_update }) @@ -4913,39 +5364,6 @@ def cancel_image_function( -##################### -# -# Cancel Image Processing -# -# When running on Windows, this is an attempt at closing the command -# prompt from the web display. It's really not worth having this. You can -# just close the prompt. I would like a nice way to cancel image -# creation, but couldn't figure that out. -# -##################### - -def close_command_prompt(): - - # I simply don't know how to stop the image generation without closing - # the command prompt. Doing that requires the code below twice for some - # reason. - # - # Method: - # https://stackoverflow.com/questions/67146623/how-to-close-the-command-prompt-from-python-script-directly - - gr.Warning("The command prompt window has been closed. Any image generation in progress has been stopped. To generate any other images, you will need to launch the command prompt again.") - - os.system('title kill_window') - - os.system(f'taskkill /f /fi "WINDOWTITLE eq kill_window"') - os.system(f'taskkill /f /fi "WINDOWTITLE eq kill_window"') - - - - - - - ##################### # # Download Data From HuggingFace @@ -5052,6 +5470,7 @@ def generate_link_function( base_model_field_index, prompt_text, negative_prompt_text, + allow_longer_prompts_for_sd_1_5_based_models_field_value, scheduler_index, image_width, image_height, @@ -5077,7 +5496,10 @@ def generate_link_function( stored_pipe_state, stored_refiner_state, stored_upscaler_state, - save_base_image_when_using_refiner_field_value, + create_preview_images, + do_not_create_refining_preview_images, + do_not_create_upscaling_preview_images, + save_base_image_when_using_refiner_or_upscaler_field_value, save_refined_image_when_using_upscaler_field_value, user_id_state, image_generation_id_state, @@ -5184,6 +5606,7 @@ def generate_link_function( if refiner_denoise_start_valid(refining_denoise_start_field_value): + refining_denoise_start_field_value = rounded_number(refining_denoise_start_field_value, 2) page_vars += refiner_denoise_start_key_in_url + "=" + str(refining_denoise_start_field_value) + "&" @@ -5225,13 +5648,61 @@ def generate_link_function( + nice_show_base_image_when_using_refiner_or_upscaler = "yes" + if save_base_image_when_using_refiner_or_upscaler_field_value == False: + nice_show_base_image_when_using_refiner_or_upscaler = "no" + + page_vars += show_base_image_when_using_refiner_or_upscaler_key_in_url + "=" + nice_show_base_image_when_using_refiner_or_upscaler + "&" + + + + nice_show_refined_image_when_using_upscaler = "yes" + if save_refined_image_when_using_upscaler_field_value == False: + nice_show_refined_image_when_using_upscaler = "no" + + page_vars += show_refined_image_when_using_upscaler_key_in_url + "=" + nice_show_refined_image_when_using_upscaler + "&" + + + + nice_create_preview_images = "yes" + if create_preview_images == False: + nice_create_preview_images = "no" + + page_vars += create_preview_images_key_in_url + "=" + nice_create_preview_images + "&" + + + + nice_do_not_create_refining_preview_images = "yes" + if do_not_create_refining_preview_images == False: + nice_do_not_create_refining_preview_images = "no" + + page_vars += do_not_create_refining_preview_images_key_in_url + "=" + nice_do_not_create_refining_preview_images + "&" + + + + nice_do_not_create_upscaling_preview_images = "yes" + if do_not_create_upscaling_preview_images == False: + nice_do_not_create_upscaling_preview_images = "no" + + page_vars += do_not_create_upscaling_preview_images_key_in_url + "=" + nice_do_not_create_upscaling_preview_images + "&" + + + + nice_allow_longer_prompts_for_sd_1_5_based_models = "yes" + if allow_longer_prompts_for_sd_1_5_based_models_field_value == False: + nice_allow_longer_prompts_for_sd_1_5_based_models = "no" + + page_vars += allow_longer_prompts_for_sd_1_5_based_models_key_in_url + "=" + nice_allow_longer_prompts_for_sd_1_5_based_models + "&" + + + prompt_text = prompt_text[:maximum_prompt_characer_count] if prompt_valid(prompt_text): page_vars += prompt_field_key_in_url + "=" + urllib.parse.quote(prompt_text) + "&" - negative_prompt_text = negative_prompt_text[:maximum_prompt_characer_count] + negative_prompt_text = negative_prompt_text[:maximum_neg_prompt_characer_count] if prompt_valid(negative_prompt_text): page_vars += negative_prompt_field_key_in_url + "=" + urllib.parse.quote(negative_prompt_text) + "&" @@ -5262,6 +5733,7 @@ def generate_link_textbox_function( base_model_field_index, prompt_text, negative_prompt_text, + allow_longer_prompts_for_sd_1_5_based_models_field_value, scheduler_index, image_width, image_height, @@ -5287,7 +5759,10 @@ def generate_link_textbox_function( stored_pipe_state, stored_refiner_state, stored_upscaler_state, - save_base_image_when_using_refiner_field_value, + create_preview_images, + do_not_create_refining_preview_images, + do_not_create_upscaling_preview_images, + save_base_image_when_using_refiner_or_upscaler_field_value, save_refined_image_when_using_upscaler_field_value, user_id_state, image_generation_id_state, @@ -5301,6 +5776,7 @@ def generate_link_textbox_function( base_model_field_index, prompt_text, negative_prompt_text, + allow_longer_prompts_for_sd_1_5_based_models_field_value, scheduler_index, image_width, image_height, @@ -5326,7 +5802,10 @@ def generate_link_textbox_function( stored_pipe_state, stored_refiner_state, stored_upscaler_state, - save_base_image_when_using_refiner_field_value, + create_preview_images, + do_not_create_refining_preview_images, + do_not_create_upscaling_preview_images, + save_base_image_when_using_refiner_or_upscaler_field_value, save_refined_image_when_using_upscaler_field_value, user_id_state, image_generation_id_state, @@ -5476,6 +5955,25 @@ def get_query_params( + if allow_longer_prompts_for_sd_1_5_based_models_key_in_url in url_object: + + allow_longer_prompts_for_sd_1_5_based_models_in_url = url_object[allow_longer_prompts_for_sd_1_5_based_models_key_in_url].lower() + + allow_longer_prompts_for_sd_1_5_based_models_in_url_formatted = True + + if ( + (allow_longer_prompts_for_sd_1_5_based_models_in_url == "0") or + (allow_longer_prompts_for_sd_1_5_based_models_in_url == "n") or + (allow_longer_prompts_for_sd_1_5_based_models_in_url == "no") or + (allow_longer_prompts_for_sd_1_5_based_models_in_url == "false") + ): + + allow_longer_prompts_for_sd_1_5_based_models_in_url_formatted = False + + field_object.update({allow_longer_prompts_for_sd_1_5_based_models_field: allow_longer_prompts_for_sd_1_5_based_models_in_url_formatted}) + + + if scheduler_field_key_in_url in url_object: scheduler_field_in_url = url_object[scheduler_field_key_in_url].lower() @@ -5618,6 +6116,8 @@ def get_query_params( if refiner_denoise_start_valid(refiner_denoise_start_in_url): + refiner_denoise_start_in_url = rounded_number(refiner_denoise_start_in_url, 2) + field_object.update({refining_denoise_start_field: refiner_denoise_start_in_url}) @@ -5718,6 +6218,101 @@ def get_query_params( + if show_base_image_when_using_refiner_or_upscaler_key_in_url in url_object: + + show_base_image_when_using_refiner_or_upscaler_in_url = str(url_object[show_base_image_when_using_refiner_or_upscaler_key_in_url]) + + show_base_image_when_using_refiner_or_upscaler_in_url_formatted = True + + if ( + (show_base_image_when_using_refiner_or_upscaler_in_url == "0") or + (show_base_image_when_using_refiner_or_upscaler_in_url == "n") or + (show_base_image_when_using_refiner_or_upscaler_in_url == "no") or + (show_base_image_when_using_refiner_or_upscaler_in_url == "false") + ): + + show_base_image_when_using_refiner_or_upscaler_in_url_formatted = False + + field_object.update({save_base_image_when_using_refiner_or_upscaler_field: show_base_image_when_using_refiner_or_upscaler_in_url_formatted}) + + + + if show_refined_image_when_using_upscaler_key_in_url in url_object: + + show_refined_image_when_using_upscaler_in_url = str(url_object[show_refined_image_when_using_upscaler_key_in_url]) + + show_refined_image_when_using_upscaler_in_url_formatted = True + + if ( + (show_refined_image_when_using_upscaler_in_url == "0") or + (show_refined_image_when_using_upscaler_in_url == "n") or + (show_refined_image_when_using_upscaler_in_url == "no") or + (show_refined_image_when_using_upscaler_in_url == "false") + ): + + show_refined_image_when_using_upscaler_in_url_formatted = False + + field_object.update({save_refined_image_when_using_upscaler_field: show_refined_image_when_using_upscaler_in_url_formatted}) + + + + if create_preview_images_key_in_url in url_object: + + create_preview_images_in_url = str(url_object[create_preview_images_key_in_url]) + + create_preview_images_in_url_formatted = True + + if ( + (create_preview_images_in_url == "0") or + (create_preview_images_in_url == "n") or + (create_preview_images_in_url == "no") or + (create_preview_images_in_url == "false") + ): + + create_preview_images_in_url_formatted = False + + field_object.update({create_preview_images_field: create_preview_images_in_url_formatted}) + + + + if do_not_create_refining_preview_images_key_in_url in url_object: + + do_not_create_refining_preview_images_in_url = str(url_object[do_not_create_refining_preview_images_key_in_url]) + + do_not_create_refining_preview_images_in_url_formatted = True + + if ( + (do_not_create_refining_preview_images_in_url == "0") or + (do_not_create_refining_preview_images_in_url == "n") or + (do_not_create_refining_preview_images_in_url == "no") or + (do_not_create_refining_preview_images_in_url == "false") + ): + + do_not_create_refining_preview_images_in_url_formatted = False + + field_object.update({do_not_create_refining_preview_images_field: do_not_create_refining_preview_images_in_url_formatted}) + + + + if do_not_create_upscaling_preview_images_key_in_url in url_object: + + do_not_create_upscaling_preview_images_in_url = str(url_object[do_not_create_upscaling_preview_images_key_in_url]) + + do_not_create_upscaling_preview_images_in_url_formatted = True + + if ( + (do_not_create_upscaling_preview_images_in_url == "0") or + (do_not_create_upscaling_preview_images_in_url == "n") or + (do_not_create_upscaling_preview_images_in_url == "no") or + (do_not_create_upscaling_preview_images_in_url == "false") + ): + + do_not_create_upscaling_preview_images_in_url_formatted = False + + field_object.update({do_not_create_upscaling_preview_images_field: do_not_create_upscaling_preview_images_in_url_formatted}) + + + if special_theme_key_in_url in url_object: special_theme_in_url = str(url_object[special_theme_key_in_url]) @@ -5819,6 +6414,91 @@ def set_base_model_and_model_configuration_from_query_params( +##################### +# +# +# +# +# +##################### + +def delete_preview_imagery( + output_image_preview_field_value +): + + global previous_preview_image + + locally_saved_image = output_image_preview_field_value + + try: + + if previous_preview_image != None: + + if os.path.exists(previous_preview_image): + + os.remove(previous_preview_image) + + folder_to_delete = os.path.dirname(previous_preview_image) + + if os.path.exists(folder_to_delete): + + os.rmdir(folder_to_delete) + + except BaseException as error_message: + + print ("Either preview image or directory could not be automatically deleted.") + + previous_preview_image = locally_saved_image + + return {} + + + + + + + +##################### +# +# Allow Longer Prompts for Stable Diffusion 1.5 Based Models +# +# This only applies to those models. +# +##################### + +def allow_longer_prompts_for_sd_1_5_based_models_function( + allow_longer_prompts_for_sd_1_5_based_models_field_value +): + + allow_longer_prompts_for_sd_1_5_based_models_field_value = numerical_bool(allow_longer_prompts_for_sd_1_5_based_models_field_value) + + prompt_textbox_label = prompt_textbox_label_with_length_limit + negative_prompt_textbox_label = negative_prompt_textbox_label_with_length_limit + + if allow_longer_prompts_for_sd_1_5_based_models_field_value == 1: + + prompt_textbox_label = prompt_textbox_label_with_no_length_limit + negative_prompt_textbox_label = negative_prompt_textbox_label_with_no_length_limit + + prompt_field_update = gr.Textbox( + label = prompt_textbox_label + ) + + negative_prompt_field_update = gr.Textbox( + label = negative_prompt_textbox_label + ) + + return { + prompt_field: prompt_field_update, + negative_prompt_field: negative_prompt_field_update + } + + + + + + + ############################################################################### ############################################################################### # @@ -5915,6 +6595,16 @@ footer object-fit: scale-down; } +.extra_image_class div.image-container +{ + height: 200px; +} + +.extra_image_class button.image-button img +{ + object-fit: scale-down; +} + body.dark .textbox_vertical_scroll textarea::-webkit-scrollbar-track { background-color: rgb(66, 66, 66) !important; @@ -6016,7 +6706,7 @@ with gr.Blocks( with gr.Row(): with gr.Column( - scale = 3, + scale = 4, min_width = 200 ): @@ -6053,9 +6743,7 @@ with gr.Blocks( with gr.Row(): prompt_field = gr.Textbox( - label = "Prompt (77 token limit):", -# label = "Prompt:", -# info = "77 token limit", + label = prompt_textbox_label_to_use, value = default_prompt ) @@ -6065,9 +6753,7 @@ with gr.Blocks( ): negative_prompt_field = gr.Textbox( - label = "Negative Prompt (77 token limit):", -# label = "Negative Prompt:", -# info = "77 token limit", + label = negative_prompt_textbox_label_to_use, value = default_negative_prompt ) @@ -6081,6 +6767,21 @@ with gr.Blocks( elem_classes = "html_field_style_class" ) + with gr.Row( + elem_id = "allow_longer_prompts_row_id", + visible = default_allow_longer_prompts_row_visibility + ): + + allow_longer_prompts_for_sd_1_5_based_models_field = gr.Checkbox( + label = "Allow longer prompts for Stable Diffusion 1.5 and PhotoReal models when not using the refiner or upscaler", + value = default_allow_longer_prompts_for_sd_1_5_based_models_is_selected, + interactive = True, + container = True, + elem_classes = "sp_checkbox" + ) + + + with gr.Group( visible = refiner_group_visible ): @@ -6135,7 +6836,15 @@ with gr.Blocks( elem_classes = "sp_checkbox" ) + with gr.Row( + elem_id = "base_model_output_in_latent_space_note_field_row_id", + visible = default_base_model_output_in_latent_space_note_field_row_visibility + ): + base_model_output_in_latent_space_note_field = gr.HTML( + value = "
If you use the refiner, and the model you have chosen, the base model output that will be used in the refiner will always be a PIL image regardless of your choice. The image would be very distorted if we didn't do this.
", + elem_classes = "html_field_style_class" + ) with gr.Group( visible = upscaler_group_visible @@ -6184,7 +6893,7 @@ with gr.Blocks( ) with gr.Column( - scale = 2, + scale = 3, min_width = 200 ): @@ -6376,35 +7085,34 @@ with gr.Blocks( interactive = True ) - - with gr.Column( - scale = 3, + scale = 4, min_width = 200 ): - output_image_field_visibility = True - output_image_gallery_field_visibility = False + output_image_field_row_visibility = True + output_image_gallery_field_row_visibility = False if use_image_gallery == 1: - output_image_field_visibility = False - output_image_gallery_field_visibility = True + output_image_field_row_visibility = False + output_image_gallery_field_row_visibility = True with gr.Row( - visible = output_image_field_visibility - ): + visible = output_image_field_row_visibility + ) as output_image_field_accordion: output_image_field = gr.Image( label = "Generated Image", type = "pil", height = gradio_image_component_height, + show_download_button = True, elem_classes = "image_scaling" ) with gr.Row( - visible = output_image_gallery_field_visibility - ): + visible = output_image_gallery_field_row_visibility + ) as output_image_gallery_field_accordion: output_image_gallery_field = gr.Gallery( label = "Generated Images", @@ -6412,51 +7120,69 @@ with gr.Blocks( selected_index = 0, allow_preview = True, preview = True, -# columns = "2", -# rows = None, -# columns = "2", -# rows = None, + columns = "2", + rows = None, height = gradio_image_gallery_component_height, object_fit = "scale-down", show_download_button = True, elem_classes = "image_scaling" ) - with gr.Row( + with gr.Accordion( + label = "Initial Image", visible = False - ) as output_image_preview_field_row: + ) as output_base_model_image_field_accordion: + + output_base_model_image_field = gr.Image( + type = "pil", + height = gradio_extra_image_component_height, + show_download_button = True, + container = False, + elem_classes = "extra_image_class" + ) - output_image_preview_field_every_value = None + with gr.Accordion( + label = "Refined Image (before upscaling)", + visible = False + ) as output_refiner_image_field_accordion: - if use_image_preview == 1: + output_refiner_image_field = gr.Image( + type = "pil", + height = gradio_extra_image_component_height, + show_download_button = True, + container = False, + elem_classes = "extra_image_class" + ) - output_image_preview_field_every_value = load_image_preview_frequency_in_seconds + with gr.Accordion( + label = "Preview", + visible = False + ) as output_image_preview_field_accordion: output_image_preview_field = gr.Image( elem_id = "image_preview_id", -# value = load_image_preview(user_id_state), -# every = output_image_preview_field_every_value, - label = "Preview", - type = "pil", + type = "filepath", interactive = False, show_download_button = True, height = gradio_image_gallery_component_height, + container = False, elem_classes = "image_scaling" ) - with gr.Row(): + with gr.Accordion( + label = "Error Information:", + visible = False + ) as error_text_field_accordion: error_text_field = gr.Textbox( - label = "Error Information:", value = "", show_copy_button = True, lines = 5, max_lines = 8, autoscroll = False, interactive = False, - container = True, - elem_classes = "textbox_vertical_scroll", - visible = False + container = False, + elem_classes = "textbox_vertical_scroll" ) with gr.Row(): @@ -6510,19 +7236,6 @@ with gr.Blocks( elem_classes = "textbox_vertical_scroll" ) - if enable_close_command_prompt_button == 1: - - with gr.Row(): - - close_command_prompt_button = gr.Button( - value = "Close Command Prompt", - variant = "stop" - ) - - gr.Markdown("Closing the command prompt will cancel any images in the process of being created. You will need to launch it again, and then likely refresh the page, to create more images.") - - - with gr.Accordion( label = "Other Settings", open = True @@ -6564,9 +7277,9 @@ with gr.Blocks( if auto_save_imagery == 1: save_or_display_word_text_for_save_base_image = "Save" - save_base_image_when_using_refiner_field = gr.Checkbox( - label = save_or_display_word_text_for_save_base_image + " base image as well when using refiner (doesn't work yet)", - value = default_save_base_image_when_using_refiner_is_selected, + save_base_image_when_using_refiner_or_upscaler_field = gr.Checkbox( + label = save_or_display_word_text_for_save_base_image + " base image as well when using refiner or upscaler", + value = default_save_base_image_when_using_refiner_or_upscaler_is_selected, interactive = True, container = False, elem_classes = "sp_checkbox" @@ -6579,13 +7292,43 @@ with gr.Blocks( save_or_display_word_text_for_save_refined_image = "save" save_refined_image_when_using_upscaler_field = gr.Checkbox( - label = "If applicable, " + save_or_display_word_text_for_save_refined_image + " refined image as well when using upscaler (doesn't work yet)", + label = "If applicable, " + save_or_display_word_text_for_save_refined_image + " refined image as well when using upscaler", value = default_save_refined_image_when_using_upscaler_is_selected, interactive = True, container = False, elem_classes = "sp_checkbox" ) + with gr.Row(): + + create_preview_images_field = gr.Checkbox( + label = "Create preview images during image generation", + value = default_create_preview_images_is_selected, + interactive = True, + container = False, + elem_classes = "sp_checkbox" + ) + + with gr.Row(): + + do_not_create_refining_preview_images_field = gr.Checkbox( + label = "If applicable, do not create preview images during refining", + value = default_do_not_create_refining_preview_images_is_selected, + interactive = True, + container = False, + elem_classes = "sp_checkbox" + ) + + with gr.Row(): + + do_not_create_upscaling_preview_images_field = gr.Checkbox( + label = "If applicable, do not create preview images during upscaling", + value = default_do_not_create_upscaling_preview_images_is_selected, + interactive = True, + container = False, + elem_classes = "sp_checkbox" + ) + with gr.Row(): refining_steps_option_for_older_configuration_field = gr.Checkbox( @@ -6874,29 +7617,29 @@ async ( (enable_upscaler == 1) ): - triggers_array = [] + update_refiner_and_upscaler_status_triggers_array = [] if enable_refiner == 1: - triggers_array.extend([ + update_refiner_and_upscaler_status_triggers_array.extend([ base_model_field.change, refining_selection_field.change ]) for this_model_configuration_dropdown_field in model_configuration_dropdown_fields_array: - triggers_array.extend([ + update_refiner_and_upscaler_status_triggers_array.extend([ this_model_configuration_dropdown_field.change ]) if enable_upscaler == 1: - triggers_array.extend([ + update_refiner_and_upscaler_status_triggers_array.extend([ upscaling_selection_field.change ]) gr.on( - triggers = triggers_array, + triggers = update_refiner_and_upscaler_status_triggers_array, fn = None, inputs = [ refining_selection_field, @@ -6914,6 +7657,7 @@ async ( base_model_field, prompt_field, negative_prompt_field, + allow_longer_prompts_for_sd_1_5_based_models_field, scheduler_field, image_width_field, image_height_field, @@ -6939,7 +7683,10 @@ async ( stored_pipe_state, stored_refiner_state, stored_upscaler_state, - save_base_image_when_using_refiner_field, + create_preview_images_field, + do_not_create_refining_preview_images_field, + do_not_create_upscaling_preview_images_field, + save_base_image_when_using_refiner_or_upscaler_field, save_refined_image_when_using_upscaler_field, user_id_state, image_generation_id_state @@ -6955,13 +7702,21 @@ async ( generate_image_button_click_event = generate_image_button.click( fn = before_create_image_function, - inputs = None, +# inputs = None, + inputs = [ + create_preview_images_field + ], outputs = [ generate_image_button, output_image_field, output_image_gallery_field, + output_base_model_image_field_accordion, + output_base_model_image_field, + output_refiner_image_field_accordion, + output_refiner_image_field, output_text_field, - output_image_preview_field_row, + output_image_preview_field_accordion, + output_image_preview_field, prompt_truncated_field_group, prompt_truncated_field, negative_prompt_truncated_field_group, @@ -6970,6 +7725,7 @@ async ( cancel_image_button, cancel_image_message_field_row, cancel_image_message_field, + error_text_field_accordion, error_text_field, image_generation_id_state ], @@ -6981,6 +7737,10 @@ async ( outputs = [ output_image_field, output_image_gallery_field, + output_base_model_image_field_accordion, + output_base_model_image_field, + output_refiner_image_field_accordion, + output_refiner_image_field, output_text_field, prompt_truncated_field_group, prompt_truncated_field, @@ -6992,6 +7752,7 @@ async ( stored_pipe_state, stored_refiner_state, stored_upscaler_state, + error_text_field_accordion, error_text_field ], show_progress = "full", @@ -7004,7 +7765,7 @@ async ( output_image_field, output_image_gallery_field, output_text_field, - output_image_preview_field_row, + output_image_preview_field_accordion, generate_image_button_row, cancel_image_button_row, cancel_image_button, @@ -7082,7 +7843,7 @@ async ( generate_image_button_row, generate_image_button, output_text_field, - output_image_preview_field_row, + output_image_preview_field_accordion, cancel_image_button_row, cancel_image_button, cancel_image_message_field_row, @@ -7095,20 +7856,6 @@ async ( - if enable_close_command_prompt_button == 1: - - # https://github.com/gradio-app/gradio/pull/2433/files - - close_command_prompt_button_click_event = close_command_prompt_button.click( - fn = close_command_prompt, - inputs = None, - outputs = None, - cancels = [generate_image_button_click_event], - queue = True - ) - - - # Remove last comma model_configuration_dropdown_field_values_for_js = model_configuration_dropdown_field_values_for_js[:-1] @@ -7195,7 +7942,7 @@ async () => {{ every_value_in_seconds_for_image_preview = None - if use_image_preview == 1: + if enable_image_preview == 1: every_value_in_seconds_for_image_preview = load_image_preview_frequency_in_seconds @@ -7333,6 +8080,104 @@ async ( + if ( + (enable_image_preview == 1) and + (delete_preview_images_immediately == 1) + ): + + output_image_preview_field_change_event = output_image_preview_field.change( + fn = delete_preview_imagery, + inputs = [ + output_image_preview_field + ], + outputs = None, + show_progress = "hidden", + queue = False + ) + + + + if enable_refiner == 1: + + base_model_output_in_latent_space_note_function_js = """ + +async ( + refiningSelectionFieldValue, + baseModelFieldFullNameValue, + refiningBaseModelOutputToRefinerIsInLatentSpaceValue +) => {{ + "use strict"; + + var baseModelNamesObject = {0}; + var baseModelsNotSupportingBaseModelOutputInLatentSpaceToRefinerObject = {1}; + + var baseModelFullNamesToBaseModelIdConversion = {{}}; + Object.keys(baseModelNamesObject).forEach(key => {{ + baseModelFullNamesToBaseModelIdConversion[baseModelNamesObject[key]] = key; + }}); + var baseModelFieldValue = ""; + if (baseModelFullNamesToBaseModelIdConversion.hasOwnProperty(baseModelFieldFullNameValue)) {{ + baseModelFieldValue = baseModelFullNamesToBaseModelIdConversion[baseModelFieldFullNameValue]; + }} + + var baseModelOutputInLatentSpaceNoteFieldDisplay = "none"; + if ( + (refiningSelectionFieldValue === "Yes") && + baseModelsNotSupportingBaseModelOutputInLatentSpaceToRefinerObject.hasOwnProperty(baseModelFieldValue) && + (refiningBaseModelOutputToRefinerIsInLatentSpaceValue === true) + ) {{ + baseModelOutputInLatentSpaceNoteFieldDisplay = "block"; + }} + document.getElementById("base_model_output_in_latent_space_note_field_row_id").style.display = baseModelOutputInLatentSpaceNoteFieldDisplay; + +}} + +""".format( + base_model_names_object, + base_models_not_supporting_base_model_output_in_latent_space_to_refiner_object +) + + + + base_model_output_in_latent_space_note_triggers_array = [ + refining_selection_field.change, + base_model_field.change, + refining_base_model_output_to_refiner_is_in_latent_space_field.change + ] + + gr.on( + triggers = base_model_output_in_latent_space_note_triggers_array, + fn = None, + inputs = [ + refining_selection_field, + base_model_field, + refining_base_model_output_to_refiner_is_in_latent_space_field + ], + outputs = None, + show_progress = "hidden", + queue = False, + js = base_model_output_in_latent_space_note_function_js + ) + + + + if enable_longer_prompts == 1: + + allow_longer_prompts_for_sd_1_5_based_models_field_change_event = allow_longer_prompts_for_sd_1_5_based_models_field.change( + fn = allow_longer_prompts_for_sd_1_5_based_models_function, + inputs = [ + allow_longer_prompts_for_sd_1_5_based_models_field + ], + outputs = [ + prompt_field, + negative_prompt_field + ], + show_progress = "hidden", + queue = False + ) + + + sd_interface.queue( max_size = max_queue_size )