From 9588721197bc3c61354811eca5aff6f470b0b2f8 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 7 Feb 2024 04:49:17 -0800 Subject: [PATCH 01/69] feat: support LyCORIS BOFT --- extensions-builtin/Lora/network_oft.py | 44 ++++++++++++++++++++------ 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index d1c46a4b2..8a37828cc 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -1,6 +1,6 @@ import torch import network -from lyco_helpers import factorization +from lyco_helpers import factorization, butterfly_factor from einops import rearrange @@ -36,6 +36,12 @@ class NetworkModuleOFT(network.NetworkModule): # self.alpha is unused self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) + self.is_boft = False + if "boft" in weights.w.keys(): + self.is_boft = True + self.boft_b = weights.w["boft_b"] + self.boft_m = weights.w["boft_m"] + is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] is_conv = type(self.sd_module) in [torch.nn.Conv2d] is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] # unsupported @@ -68,14 +74,34 @@ class NetworkModuleOFT(network.NetworkModule): R = oft_blocks.to(orig_weight.device) - # This errors out for MultiheadAttention, might need to be handled up-stream - merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) - merged_weight = torch.einsum( - 'k n m, k n ... -> k m ...', - R, - merged_weight - ) - merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') + if not self.is_boft: + # This errors out for MultiheadAttention, might need to be handled up-stream + merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) + merged_weight = torch.einsum( + 'k n m, k n ... -> k m ...', + R, + merged_weight + ) + merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') + else: + scale = 1.0 + m = self.boft_m.to(device=oft_blocks.device, dtype=oft_blocks.dtype) + b = self.boft_b.to(device=oft_blocks.device, dtype=oft_blocks.dtype) + r_b = b // 2 + inp = orig_weight + for i in range(m): + bi = R[i] # b_num, b_size, b_size + if i == 0: + # Apply multiplier/scale and rescale into first weight + bi = bi * scale + (1 - scale) * eye + #if self.rescaled: + # bi = bi * self.rescale + inp = rearrange(inp, "(c g k) ... -> (c k g) ...", g=2, k=2**i * r_b) + inp = rearrange(inp, "(d b) ... -> d b ...", b=b) + inp = torch.einsum("b i j, b j ... -> b i ...", bi, inp) + inp = rearrange(inp, "d b ... -> (d b) ...") + inp = rearrange(inp, "(c k g) ... -> (c g k) ...", g=2, k=2**i * r_b) + merged_weight = inp updown = merged_weight.to(orig_weight.device) - orig_weight.to(merged_weight.dtype) output_shape = orig_weight.shape From a4668a16b6f8e98bc6e1553aa754735f9148770f Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 7 Feb 2024 04:51:22 -0800 Subject: [PATCH 02/69] fix: calculate butterfly factor --- extensions-builtin/Lora/network_oft.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 8a37828cc..0f20d701b 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -57,6 +57,9 @@ class NetworkModuleOFT(network.NetworkModule): self.constraint = self.alpha * self.out_dim self.num_blocks = self.dim self.block_size = self.out_dim // self.dim + elif self.is_boft: + self.constraint = None + self.block_size, self.block_num = butterfly_factor(self.out_dim, self.dim) else: self.constraint = None self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) From 81c16c965e532c6d86a969284c320ff8fcb0451d Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 7 Feb 2024 04:54:14 -0800 Subject: [PATCH 03/69] fix: add butterfly_factor fn --- extensions-builtin/Lora/lyco_helpers.py | 26 +++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/extensions-builtin/Lora/lyco_helpers.py b/extensions-builtin/Lora/lyco_helpers.py index 1679a0ce6..3c4f5bad2 100644 --- a/extensions-builtin/Lora/lyco_helpers.py +++ b/extensions-builtin/Lora/lyco_helpers.py @@ -66,3 +66,29 @@ def factorization(dimension: int, factor:int=-1) -> tuple[int, int]: n, m = m, n return m, n +# from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/boft.py +def butterfly_factor(dimension: int, factor: int = -1) -> tuple[int, int]: + """ + m = 2k + n = 2**p + m*n = dim + """ + + # Find the first solution and check if it is even doable + m = n = 0 + while m <= factor: + m += 2 + while dimension % m != 0 and m < dimension: + m += 2 + if m > factor: + break + if sum(int(i) for i in f"{dimension//m:b}") == 1: + n = dimension // m + + if n == 0: + raise ValueError( + f"It is impossible to decompose {dimension} with factor {factor} under BOFT constrains." + ) + + #log_butterfly_factorize(dimension, factor, (dimension // n, n)) + return dimension // n, n From 2f1073dc6edf2d1388f6aee4af91cb354099a463 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 7 Feb 2024 04:55:11 -0800 Subject: [PATCH 04/69] style: fix lint --- extensions-builtin/Lora/network_oft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 0f20d701b..dc6db56f1 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -96,7 +96,7 @@ class NetworkModuleOFT(network.NetworkModule): bi = R[i] # b_num, b_size, b_size if i == 0: # Apply multiplier/scale and rescale into first weight - bi = bi * scale + (1 - scale) * eye + bi = bi * scale + (1 - scale) * eye #if self.rescaled: # bi = bi * self.rescale inp = rearrange(inp, "(c g k) ... -> (c k g) ...", g=2, k=2**i * r_b) From 325eaeb584f8565d49ce73553165088f794d3d12 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:55:05 -0800 Subject: [PATCH 05/69] fix: get boft params from weight shape --- extensions-builtin/Lora/network_oft.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index dc6db56f1..fc7132651 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -1,6 +1,6 @@ import torch import network -from lyco_helpers import factorization, butterfly_factor +from lyco_helpers import factorization from einops import rearrange @@ -37,10 +37,8 @@ class NetworkModuleOFT(network.NetworkModule): self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) self.is_boft = False - if "boft" in weights.w.keys(): + if weights.w["oft_diag"].dim() == 4: self.is_boft = True - self.boft_b = weights.w["boft_b"] - self.boft_m = weights.w["boft_m"] is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] is_conv = type(self.sd_module) in [torch.nn.Conv2d] @@ -59,7 +57,11 @@ class NetworkModuleOFT(network.NetworkModule): self.block_size = self.out_dim // self.dim elif self.is_boft: self.constraint = None - self.block_size, self.block_num = butterfly_factor(self.out_dim, self.dim) + self.boft_m = weights.w["oft_diag"].shape[0] + self.block_num = weights.w["oft_diag"].shape[1] + self.block_size = weights.w["oft_diag"].shape[2] + self.boft_b = self.block_size + #self.block_size, self.block_num = butterfly_factor(self.out_dim, self.dim) else: self.constraint = None self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) @@ -88,8 +90,8 @@ class NetworkModuleOFT(network.NetworkModule): merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') else: scale = 1.0 - m = self.boft_m.to(device=oft_blocks.device, dtype=oft_blocks.dtype) - b = self.boft_b.to(device=oft_blocks.device, dtype=oft_blocks.dtype) + m = self.boft_m + b = self.boft_b r_b = b // 2 inp = orig_weight for i in range(m): From 613b0d9548a859408433bff7a6dca7fd0f2eae7e Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Thu, 8 Feb 2024 21:58:59 -0800 Subject: [PATCH 06/69] doc: add boft comment --- extensions-builtin/Lora/network_oft.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index fc7132651..d7b317029 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -29,13 +29,14 @@ class NetworkModuleOFT(network.NetworkModule): self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size) self.alpha = weights.w["alpha"] # alpha is constraint self.dim = self.oft_blocks.shape[0] # lora dim - # LyCORIS + # LyCORIS OFT elif "oft_diag" in weights.w.keys(): self.is_kohya = False self.oft_blocks = weights.w["oft_diag"] # self.alpha is unused self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) + # LyCORIS BOFT self.is_boft = False if weights.w["oft_diag"].dim() == 4: self.is_boft = True @@ -89,6 +90,7 @@ class NetworkModuleOFT(network.NetworkModule): ) merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') else: + # TODO: determine correct value for scale scale = 1.0 m = self.boft_m b = self.boft_b @@ -99,8 +101,6 @@ class NetworkModuleOFT(network.NetworkModule): if i == 0: # Apply multiplier/scale and rescale into first weight bi = bi * scale + (1 - scale) * eye - #if self.rescaled: - # bi = bi * self.rescale inp = rearrange(inp, "(c g k) ... -> (c k g) ...", g=2, k=2**i * r_b) inp = rearrange(inp, "(d b) ... -> d b ...", b=b) inp = torch.einsum("b i j, b j ... -> b i ...", bi, inp) From eb6f2df826087fdc62f6680364a0e16f666eef64 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Thu, 8 Feb 2024 22:00:15 -0800 Subject: [PATCH 07/69] Revert "fix: add butterfly_factor fn" This reverts commit 81c16c965e532c6d86a969284c320ff8fcb0451d. --- extensions-builtin/Lora/lyco_helpers.py | 26 ------------------------- 1 file changed, 26 deletions(-) diff --git a/extensions-builtin/Lora/lyco_helpers.py b/extensions-builtin/Lora/lyco_helpers.py index 3c4f5bad2..1679a0ce6 100644 --- a/extensions-builtin/Lora/lyco_helpers.py +++ b/extensions-builtin/Lora/lyco_helpers.py @@ -66,29 +66,3 @@ def factorization(dimension: int, factor:int=-1) -> tuple[int, int]: n, m = m, n return m, n -# from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/boft.py -def butterfly_factor(dimension: int, factor: int = -1) -> tuple[int, int]: - """ - m = 2k - n = 2**p - m*n = dim - """ - - # Find the first solution and check if it is even doable - m = n = 0 - while m <= factor: - m += 2 - while dimension % m != 0 and m < dimension: - m += 2 - if m > factor: - break - if sum(int(i) for i in f"{dimension//m:b}") == 1: - n = dimension // m - - if n == 0: - raise ValueError( - f"It is impossible to decompose {dimension} with factor {factor} under BOFT constrains." - ) - - #log_butterfly_factorize(dimension, factor, (dimension // n, n)) - return dimension // n, n From 90441294db16383bce6f341e8a1f67fe422172d4 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 12 Feb 2024 14:25:09 +0800 Subject: [PATCH 08/69] Add rescale mechanism LyCORIS will support save oft_blocks instead of oft_diag in the near future (for both OFT and BOFT) But this means we need to store the rescale if user enable it. --- extensions-builtin/Lora/network_oft.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index d7b317029..ed221d8fe 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -40,6 +40,7 @@ class NetworkModuleOFT(network.NetworkModule): self.is_boft = False if weights.w["oft_diag"].dim() == 4: self.is_boft = True + self.rescale = weight.w.get('rescale', None) is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] is_conv = type(self.sd_module) in [torch.nn.Conv2d] @@ -108,6 +109,10 @@ class NetworkModuleOFT(network.NetworkModule): inp = rearrange(inp, "(c k g) ... -> (c g k) ...", g=2, k=2**i * r_b) merged_weight = inp + # Rescale mechanism + if self.rescale is not None: + merged_weight = self.rescale.to(merged_weight) * merged_weight + updown = merged_weight.to(orig_weight.device) - orig_weight.to(merged_weight.dtype) output_shape = orig_weight.shape return self.finalize_updown(updown, orig_weight, output_shape) From 4573195894fffeae08a94c015a94772c1a54a58d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 11:40:53 +0300 Subject: [PATCH 09/69] prevent escape button causing an interrupt when no generation has been made yet --- script.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script.js b/script.js index 25cf0973a..f069b1ef0 100644 --- a/script.js +++ b/script.js @@ -167,7 +167,7 @@ document.addEventListener('keydown', function(e) { const lightboxModal = document.querySelector('#lightboxModal'); if (!globalPopup || globalPopup.style.display === 'none') { if (document.activeElement === lightboxModal) return; - if (interruptButton.style.display !== 'none') { + if (interruptButton.style.display === 'block') { interruptButton.click(); e.preventDefault(); } From 4ff1fabc86db927c45642704fda3472d399f3e19 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 13:21:08 +0300 Subject: [PATCH 10/69] Update comment for Pad prompt/negative prompt v0 to add a warning about truncation, make it override the v1 implementation --- modules/sd_samplers_cfg_denoiser.py | 6 +++--- modules/shared_options.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index 941dff4b3..a73d3b036 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -220,10 +220,10 @@ class CFGDenoiser(torch.nn.Module): self.padded_cond_uncond = False self.padded_cond_uncond_v0 = False - if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: - tensor, uncond = self.pad_cond_uncond(tensor, uncond) - elif shared.opts.pad_cond_uncond_v0 and tensor.shape[1] != uncond.shape[1]: + if shared.opts.pad_cond_uncond_v0 and tensor.shape[1] != uncond.shape[1]: tensor, uncond = self.pad_cond_uncond_v0(tensor, uncond) + elif shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: + tensor, uncond = self.pad_cond_uncond(tensor, uncond) if tensor.shape[1] == uncond.shape[1] or skip_uncond: if is_edit_model: diff --git a/modules/shared_options.py b/modules/shared_options.py index e1d11c8e0..25b47aa19 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -211,7 +211,7 @@ options_templates.update(options_section(('optimizations', "Optimizations", "sd" "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio hr').info("only applies if non-zero and overrides above"), "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"), - "pad_cond_uncond_v0": OptionInfo(False, "Pad prompt/negative prompt (v0)", infotext='Pad conds v0').info("alternative implementation for the above; used prior to 1.6.0 for DDIM sampler; ignored if the above is set; changes seeds"), + "pad_cond_uncond_v0": OptionInfo(False, "Pad prompt/negative prompt (v0)", infotext='Pad conds v0').info("alternative implementation for the above; used prior to 1.6.0 for DDIM sampler; overrides the above if set; WARNING: truncates negative prompt if it's too long; changes seeds"), "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"), "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"), "fp8_storage": OptionInfo("Disable", "FP8 weight", gr.Radio, {"choices": ["Disable", "Enable for SDXL", "Enable"]}).info("Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0."), From a18e54ecd756a4101e16e42fc313df259542e07b Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 18 Feb 2024 00:38:05 +0900 Subject: [PATCH 11/69] option "open image button" open the actual dir --- modules/shared_options.py | 2 ++ modules/ui_common.py | 54 +++++++++++++++++++++++++++------------ modules/ui_tempdir.py | 15 +++++++++++ 3 files changed, 54 insertions(+), 17 deletions(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 25b47aa19..7571a7d1d 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -284,6 +284,8 @@ options_templates.update(options_section(('ui_gallery', "Gallery", "ui"), { "sd_webui_modal_lightbox_icon_opacity": OptionInfo(1, "Full page image viewer: control icon unfocused opacity", gr.Slider, {"minimum": 0.0, "maximum": 1, "step": 0.01}, onchange=shared.reload_gradio_theme).info('for mouse only').needs_reload_ui(), "sd_webui_modal_lightbox_toolbar_opacity": OptionInfo(0.9, "Full page image viewer: tool bar opacity", gr.Slider, {"minimum": 0.0, "maximum": 1, "step": 0.01}, onchange=shared.reload_gradio_theme).info('for mouse only').needs_reload_ui(), "gallery_height": OptionInfo("", "Gallery height", gr.Textbox).info("can be any valid CSS value, for example 768px or 20em").needs_reload_ui(), + "button_open_image_actual_dir": OptionInfo(True, '"Open images output directory" button opens the actual directory of the image rather than the output root folder'), + "button_open_image_actual_dir_temp": OptionInfo(False, '"Open images output directory" button opens the actual directory even for temp images'), })) options_templates.update(options_section(('ui_alternatives', "UI alternatives", "ui"), { diff --git a/modules/ui_common.py b/modules/ui_common.py index 29fe7d0e9..78481c6fb 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -9,7 +9,7 @@ import sys import gradio as gr import subprocess as sp -from modules import call_queue, shared +from modules import call_queue, shared, ui_tempdir from modules.infotext_utils import image_from_url_text import modules.images from modules.ui_components import ToolButton @@ -164,29 +164,45 @@ class OutputPanel: def create_output_panel(tabname, outdir, toprow=None): res = OutputPanel() - def open_folder(f): + def open_folder(f, images=None, index=None): + if shared.cmd_opts.hide_ui_dir_config: + return + + try: + if shared.opts.button_open_image_actual_dir and 0 <= index < len(images): + image = images[index] + image_path = image["name"].rsplit('?', 1)[0] + image_dir = os.path.split(image_path)[0] + if shared.opts.button_open_image_actual_dir_temp or not ui_tempdir.is_gradio_temp_path(image_dir): + f = image_dir + except Exception: + pass + if not os.path.exists(f): - print(f'Folder "{f}" does not exist. After you create an image, the folder will be created.') + msg = f'Folder "{f}" does not exist. After you create an image, the folder will be created.' + print(msg) + gr.Info(msg) return elif not os.path.isdir(f): - print(f""" + msg = f""" WARNING An open_folder request was made with an argument that is not a folder. This could be an error or a malicious attempt to run code on your computer. Requested path was: {f} -""", file=sys.stderr) +""" + print(msg, file=sys.stderr) + gr.Warning(msg) return - if not shared.cmd_opts.hide_ui_dir_config: - path = os.path.normpath(f) - if platform.system() == "Windows": - os.startfile(path) - elif platform.system() == "Darwin": - sp.Popen(["open", path]) - elif "microsoft-standard-WSL2" in platform.uname().release: - sp.Popen(["wsl-open", path]) - else: - sp.Popen(["xdg-open", path]) + path = os.path.normpath(f) + if platform.system() == "Windows": + os.startfile(path) + elif platform.system() == "Darwin": + sp.Popen(["open", path]) + elif "microsoft-standard-WSL2" in platform.uname().release: + sp.Popen(["wsl-open", path]) + else: + sp.Popen(["xdg-open", path]) with gr.Column(elem_id=f"{tabname}_results"): if toprow: @@ -213,8 +229,12 @@ Requested path was: {f} res.button_upscale = ToolButton('✨', elem_id=f'{tabname}_upscale', tooltip="Create an upscaled version of the current image using hires fix settings.") open_folder_button.click( - fn=lambda: open_folder(shared.opts.outdir_samples or outdir), - inputs=[], + fn=lambda images, index: open_folder(shared.opts.outdir_samples or outdir, images, index), + _js="(y, w) => [y, selected_gallery_index()]", + inputs=[ + res.gallery, + open_folder_button, # placeholder for index + ], outputs=[], ) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 91f40ea42..621ed1eca 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -81,3 +81,18 @@ def cleanup_tmpdr(): filename = os.path.join(root, name) os.remove(filename) + + +def is_gradio_temp_path(path): + """ + Check if the path is a temp dir used by gradio + """ + path = Path(path) + if shared.opts.temp_dir and path.is_relative_to(shared.opts.temp_dir): + return True + if gradio_temp_dir := os.environ.get("GRADIO_TEMP_DIR"): + if path.is_relative_to(gradio_temp_dir): + return True + if path.is_relative_to(Path(tempfile.gettempdir()) / "gradio"): + return True + return False From 71072f56204c300fa294e15eb7d07592edacda16 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 18 Feb 2024 02:47:44 +0900 Subject: [PATCH 12/69] re-work open image button settings --- modules/shared_options.py | 3 +-- modules/ui_common.py | 8 +++----- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 7571a7d1d..bb3752ba6 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -284,8 +284,7 @@ options_templates.update(options_section(('ui_gallery', "Gallery", "ui"), { "sd_webui_modal_lightbox_icon_opacity": OptionInfo(1, "Full page image viewer: control icon unfocused opacity", gr.Slider, {"minimum": 0.0, "maximum": 1, "step": 0.01}, onchange=shared.reload_gradio_theme).info('for mouse only').needs_reload_ui(), "sd_webui_modal_lightbox_toolbar_opacity": OptionInfo(0.9, "Full page image viewer: tool bar opacity", gr.Slider, {"minimum": 0.0, "maximum": 1, "step": 0.01}, onchange=shared.reload_gradio_theme).info('for mouse only').needs_reload_ui(), "gallery_height": OptionInfo("", "Gallery height", gr.Textbox).info("can be any valid CSS value, for example 768px or 20em").needs_reload_ui(), - "button_open_image_actual_dir": OptionInfo(True, '"Open images output directory" button opens the actual directory of the image rather than the output root folder'), - "button_open_image_actual_dir_temp": OptionInfo(False, '"Open images output directory" button opens the actual directory even for temp images'), + "open_dir_button_choice": OptionInfo("Subdirectory", "What directory the [📂] button opens", gr.Radio, {"choices": ["Output Root", "Subdirectory", "Subdirectory (even temp dir)"]}), })) options_templates.update(options_section(('ui_alternatives', "UI alternatives", "ui"), { diff --git a/modules/ui_common.py b/modules/ui_common.py index 78481c6fb..cf1b8b32c 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -169,11 +169,9 @@ def create_output_panel(tabname, outdir, toprow=None): return try: - if shared.opts.button_open_image_actual_dir and 0 <= index < len(images): - image = images[index] - image_path = image["name"].rsplit('?', 1)[0] - image_dir = os.path.split(image_path)[0] - if shared.opts.button_open_image_actual_dir_temp or not ui_tempdir.is_gradio_temp_path(image_dir): + if 'Sub' in shared.opts.open_dir_button_choice: + image_dir = os.path.split(images[index]["name"].rsplit('?', 1)[0])[0] + if 'temp' in shared.opts.open_dir_button_choice or not ui_tempdir.is_gradio_temp_path(image_dir): f = image_dir except Exception: pass From 5a8dd0c549c0221cd3ee1c53816aa52cf7b3b0ae Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sun, 18 Feb 2024 14:58:41 +0800 Subject: [PATCH 13/69] Fix rescale --- extensions-builtin/Lora/network_oft.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index ed221d8fe..f5e657b82 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -40,7 +40,9 @@ class NetworkModuleOFT(network.NetworkModule): self.is_boft = False if weights.w["oft_diag"].dim() == 4: self.is_boft = True - self.rescale = weight.w.get('rescale', None) + self.rescale = weights.w.get('rescale', None) + if self.rescale is not None: + self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1)) is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] is_conv = type(self.sd_module) in [torch.nn.Conv2d] From 9d5dc582be54031f3a2292105eb7dc540bcc8b0c Mon Sep 17 00:00:00 2001 From: HSIEH TSUNGYU Date: Sun, 18 Feb 2024 19:27:33 +0800 Subject: [PATCH 14/69] Error handling for unsupported transparency When input images (palette mode) have transparency (bytes) in info, the output images (RGB mode) will inherit it, causing ValueError in Pillow:PIL/PngImagePlugin.py#1364 when trying to unpack this bytes. This commit check the PNG mode and transparency info, removing transparency if it's RGB mode and transparency is bytes --- modules/images.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/modules/images.py b/modules/images.py index b6f2358c3..ebd3a9014 100644 --- a/modules/images.py +++ b/modules/images.py @@ -548,6 +548,12 @@ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_p else: pnginfo_data = None + # Error handling for unsupported transparency in RGB mode + if (image.mode == "RGB" and + "transparency" in image.info and + isinstance(image.info["transparency"], bytes)): + del image.info["transparency"] + image.save(filename, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data) elif extension.lower() in (".jpg", ".jpeg", ".webp"): From 4eb949625c8cc04ba579fc5486cc10acd541596b Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 19 Feb 2024 14:43:07 +0800 Subject: [PATCH 15/69] prevent undefined variable --- extensions-builtin/Lora/network_oft.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index f5e657b82..d658ad109 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -22,6 +22,8 @@ class NetworkModuleOFT(network.NetworkModule): self.org_module: list[torch.Module] = [self.sd_module] self.scale = 1.0 + self.is_kohya = False + self.is_boft = False # kohya-ss if "oft_blocks" in weights.w.keys(): @@ -31,13 +33,11 @@ class NetworkModuleOFT(network.NetworkModule): self.dim = self.oft_blocks.shape[0] # lora dim # LyCORIS OFT elif "oft_diag" in weights.w.keys(): - self.is_kohya = False self.oft_blocks = weights.w["oft_diag"] # self.alpha is unused self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) # LyCORIS BOFT - self.is_boft = False if weights.w["oft_diag"].dim() == 4: self.is_boft = True self.rescale = weights.w.get('rescale', None) From 33c8fe1221cdc53b9f00b7041b6e06cc9b0e037c Mon Sep 17 00:00:00 2001 From: Andray Date: Mon, 19 Feb 2024 16:57:49 +0400 Subject: [PATCH 16/69] avoid doble upscaling in inpaint --- modules/processing.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index f4aa165de..d208a922d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -74,16 +74,18 @@ def uncrop(image, dest_size, paste_loc): def apply_overlay(image, paste_loc, overlay): if overlay is None: - return image + return image, image.copy() if paste_loc is not None: image = uncrop(image, (overlay.width, overlay.height), paste_loc) + original_denoised_image = image.copy() + image = image.convert('RGBA') image.alpha_composite(overlay) image = image.convert('RGB') - return image + return image, original_denoised_image def create_binary_mask(image, round=True): if image.mode == 'RGBA' and image.getextrema()[-1] != (255, 255): @@ -1021,7 +1023,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.color_corrections is not None and i < len(p.color_corrections): if save_samples and opts.save_images_before_color_correction: - image_without_cc = apply_overlay(image, p.paste_to, overlay_image) + image_without_cc, _ = apply_overlay(image, p.paste_to, overlay_image) images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction") image = apply_color_correction(p.color_corrections[i], image) @@ -1029,12 +1031,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: # that is being composited over the original image, # we need to keep the original image around # and use it in the composite step. - original_denoised_image = image.copy() - - if p.paste_to is not None: - original_denoised_image = uncrop(original_denoised_image, (overlay_image.width, overlay_image.height), p.paste_to) - - image = apply_overlay(image, p.paste_to, overlay_image) + image, original_denoised_image = apply_overlay(image, p.paste_to, overlay_image) if p.scripts is not None: pp = scripts.PostprocessImageArgs(image) From a5436a3ac0d0048a36f0652bde56ec2bc9aeb2ca Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Tue, 20 Feb 2024 17:20:14 +0800 Subject: [PATCH 17/69] Update network_oft.py --- extensions-builtin/Lora/network_oft.py | 40 ++++++++++++-------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index d658ad109..5b899bd63 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -22,24 +22,24 @@ class NetworkModuleOFT(network.NetworkModule): self.org_module: list[torch.Module] = [self.sd_module] self.scale = 1.0 - self.is_kohya = False + self.is_R = False self.is_boft = False - # kohya-ss + # kohya-ss/New LyCORIS OFT/BOFT if "oft_blocks" in weights.w.keys(): - self.is_kohya = True self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size) - self.alpha = weights.w["alpha"] # alpha is constraint + self.alpha = weights.w.get("alpha", self.alpha) # alpha is constraint self.dim = self.oft_blocks.shape[0] # lora dim - # LyCORIS OFT + # Old LyCORIS OFT elif "oft_diag" in weights.w.keys(): + self.is_R = True self.oft_blocks = weights.w["oft_diag"] # self.alpha is unused self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) - # LyCORIS BOFT - if weights.w["oft_diag"].dim() == 4: - self.is_boft = True + # LyCORIS BOFT + if self.oft_blocks.dim() == 4: + self.is_boft = True self.rescale = weights.w.get('rescale', None) if self.rescale is not None: self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1)) @@ -55,26 +55,24 @@ class NetworkModuleOFT(network.NetworkModule): elif is_other_linear: self.out_dim = self.sd_module.embed_dim - if self.is_kohya: - self.constraint = self.alpha * self.out_dim - self.num_blocks = self.dim - self.block_size = self.out_dim // self.dim + self.num_blocks = self.dim + self.block_size = self.out_dim // self.dim + self.constraint = (1 if self.alpha is None else self.alpha) * self.out_dim + if self.is_R: + self.constraint = None + self.block_size = self.dim + self.num_blocks = self.out_dim // self.dim elif self.is_boft: - self.constraint = None - self.boft_m = weights.w["oft_diag"].shape[0] - self.block_num = weights.w["oft_diag"].shape[1] - self.block_size = weights.w["oft_diag"].shape[2] + self.boft_m = self.oft_blocks.shape[0] + self.num_blocks = self.oft_blocks.shape[1] + self.block_size = self.oft_blocks.shape[2] self.boft_b = self.block_size - #self.block_size, self.block_num = butterfly_factor(self.out_dim, self.dim) - else: - self.constraint = None - self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) def calc_updown(self, orig_weight): oft_blocks = self.oft_blocks.to(orig_weight.device) eye = torch.eye(self.block_size, device=oft_blocks.device) - if self.is_kohya: + if not self.is_R: block_Q = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix norm_Q = torch.norm(block_Q.flatten()) new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device)) From 591470d86d565559d79d14a66ff14ecea2bd7706 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Tue, 20 Feb 2024 17:21:34 +0800 Subject: [PATCH 18/69] linting --- extensions-builtin/Lora/network_oft.py | 1 - 1 file changed, 1 deletion(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 5b899bd63..f14c183ae 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -1,6 +1,5 @@ import torch import network -from lyco_helpers import factorization from einops import rearrange From f4869f8de3ed76735ea331fe5463abc6190bd4cf Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 20 Feb 2024 16:18:13 -0500 Subject: [PATCH 19/69] Add compatibility option for refiner switching --- modules/shared_options.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index bb3752ba6..e17eed512 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -227,7 +227,8 @@ options_templates.update(options_section(('compatibility', "Compatibility", "sd" "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."), "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."), "use_old_scheduling": OptionInfo(False, "Use old prompt editing timelines.", infotext="Old prompt editing timelines").info("For [red:green:N]; old: If N < 1, it's a fraction of steps (and hires fix uses range from 0 to 1), if N >= 1, it's an absolute number of steps; new: If N has a decimal point in it, it's a fraction of steps (and hires fix uses range from 1 to 2), othewrwise it's an absolute number of steps"), - "use_downcasted_alpha_bar": OptionInfo(False, "Downcast model alphas_cumprod to fp16 before sampling. For reproducing old seeds.", infotext="Downcast alphas_cumprod") + "use_downcasted_alpha_bar": OptionInfo(False, "Downcast model alphas_cumprod to fp16 before sampling. For reproducing old seeds.", infotext="Downcast alphas_cumprod"), + "refiner_switch_by_sample_steps": OptionInfo(False, "Switch to refiner by sampling steps instead of model timesteps. Old behavior for refiner.", infotext="Refiner switch by sampling steps") })) options_templates.update(options_section(('interrogate', "Interrogate"), { From 09d2e5881120c4a51888633947062b40726c6fef Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 20 Feb 2024 16:22:40 -0500 Subject: [PATCH 20/69] Pass sigma to apply_refiner --- modules/sd_samplers_cfg_denoiser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index a73d3b036..93581c9ac 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -152,7 +152,7 @@ class CFGDenoiser(torch.nn.Module): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException - if sd_samplers_common.apply_refiner(self): + if sd_samplers_common.apply_refiner(self, sigma): cond = self.sampler.sampler_extra_args['cond'] uncond = self.sampler.sampler_extra_args['uncond'] From 25eeeaa65f819bb40df427141b82b46d3fcf59e9 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 20 Feb 2024 16:37:29 -0500 Subject: [PATCH 21/69] Allow refiner to be triggered by model timestep instead of sampling --- modules/sd_samplers_common.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 6bd38e12a..8052b021a 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -156,7 +156,16 @@ replace_torchsde_browinan() def apply_refiner(cfg_denoiser): - completed_ratio = cfg_denoiser.step / cfg_denoiser.total_steps + if opts.refiner_switch_by_sample_steps: + completed_ratio = cfg_denoiser.step / cfg_denoiser.total_steps + else: + # torch.max(sigma) only to handle rare case where we might have different sigmas in the same batch + try: + timestep = torch.argmin(torch.abs(cfg_denoiser.inner_model.sigmas - torch.max(sigma))) + except AttributeError: # for samplers that dont use sigmas (DDIM) sigma is actually the timestep + timestep = torch.max(sigma).to(dtype=int) + completed_ratio = (999 - timestep) / 1000 + refiner_switch_at = cfg_denoiser.p.refiner_switch_at refiner_checkpoint_info = cfg_denoiser.p.refiner_checkpoint_info From bf348032bc07d48ec0b4fbb5be1c4648ee8bd49b Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 20 Feb 2024 16:59:28 -0500 Subject: [PATCH 22/69] fix missing arg --- modules/sd_samplers_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 8052b021a..045b9e2fe 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -155,7 +155,7 @@ def replace_torchsde_browinan(): replace_torchsde_browinan() -def apply_refiner(cfg_denoiser): +def apply_refiner(cfg_denoiser, sigma): if opts.refiner_switch_by_sample_steps: completed_ratio = cfg_denoiser.step / cfg_denoiser.total_steps else: From 9c1ece89784e36a86b19f371e3b6e60bb630394e Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 20 Feb 2024 19:23:21 -0500 Subject: [PATCH 23/69] Protect alphas_cumprod during refiner switchover --- modules/sd_samplers_common.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 6bd38e12a..c9578ffe6 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -181,8 +181,12 @@ def apply_refiner(cfg_denoiser): cfg_denoiser.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title cfg_denoiser.p.extra_generation_params['Refiner switch at'] = refiner_switch_at + alphas_cumprod_original = cfg_denoiser.p.sd_model.alphas_cumprod_original + alphas_cumprod = cfg_denoiser.p.sd_model.alphas_cumprod with sd_models.SkipWritingToConfig(): sd_models.reload_model_weights(info=refiner_checkpoint_info) + cfg_denoiser.p.sd_model.alphas_cumprod_original = alphas_cumprod_original + cfg_denoiser.p.sd_model.alphas_cumprod = alphas_cumprod devices.torch_gc() cfg_denoiser.p.setup_conds() From b7aa425344ea4f598350e94c451cb7ffd3e6630c Mon Sep 17 00:00:00 2001 From: wangshuai09 <391746016@qq.com> Date: Wed, 21 Feb 2024 11:49:06 +0800 Subject: [PATCH 24/69] del gpu_info for npu --- webui.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/webui.sh b/webui.sh index f116376f7..be2b853b0 100755 --- a/webui.sh +++ b/webui.sh @@ -158,9 +158,9 @@ then if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] then export TORCH_COMMAND="pip install torch==2.0.1+rocm5.4.2 torchvision==0.15.2+rocm5.4.2 --index-url https://download.pytorch.org/whl/rocm5.4.2" - elif echo "$gpu_info" | grep -q "Huawei" && [[ -z "${TORCH_COMMAND}" ]] + elif eval "npu-smi info" then - export TORCH_COMMAND="pip install torch==2.1.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu; pip install torch_npu" + export TORCH_COMMAND="pip install torch==2.1.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu; pip install torch_npu==2.1.0" fi fi From 64179c32213f986d1378b2f414be6ef86af1a82f Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Wed, 21 Feb 2024 22:50:43 +0800 Subject: [PATCH 25/69] Update network_oft.py --- extensions-builtin/Lora/network_oft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index f14c183ae..ce931c620 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -72,7 +72,7 @@ class NetworkModuleOFT(network.NetworkModule): eye = torch.eye(self.block_size, device=oft_blocks.device) if not self.is_R: - block_Q = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix + block_Q = oft_blocks - oft_blocks.transpose(-1, -2) # ensure skew-symmetric orthogonal matrix norm_Q = torch.norm(block_Q.flatten()) new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device)) block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) From c4afdb7895a5a5224915b3c6f27f8e800e18ef41 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Thu, 22 Feb 2024 00:43:32 +0800 Subject: [PATCH 26/69] For no constraint --- extensions-builtin/Lora/network_oft.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index ce931c620..7821a8a7d 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -27,7 +27,7 @@ class NetworkModuleOFT(network.NetworkModule): # kohya-ss/New LyCORIS OFT/BOFT if "oft_blocks" in weights.w.keys(): self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size) - self.alpha = weights.w.get("alpha", self.alpha) # alpha is constraint + self.alpha = weights.w.get("alpha", None) # alpha is constraint self.dim = self.oft_blocks.shape[0] # lora dim # Old LyCORIS OFT elif "oft_diag" in weights.w.keys(): @@ -56,7 +56,7 @@ class NetworkModuleOFT(network.NetworkModule): self.num_blocks = self.dim self.block_size = self.out_dim // self.dim - self.constraint = (1 if self.alpha is None else self.alpha) * self.out_dim + self.constraint = (0 if self.alpha is None else self.alpha) * self.out_dim if self.is_R: self.constraint = None self.block_size = self.dim @@ -73,9 +73,10 @@ class NetworkModuleOFT(network.NetworkModule): if not self.is_R: block_Q = oft_blocks - oft_blocks.transpose(-1, -2) # ensure skew-symmetric orthogonal matrix - norm_Q = torch.norm(block_Q.flatten()) - new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device)) - block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) + if self.constraint != 0: + norm_Q = torch.norm(block_Q.flatten()) + new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device)) + block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) oft_blocks = torch.matmul(eye + block_Q, (eye - block_Q).float().inverse()) R = oft_blocks.to(orig_weight.device) From f537e5a519d080fd2b16d94d91e7fed8dd3fd680 Mon Sep 17 00:00:00 2001 From: dtlnor Date: Thu, 22 Feb 2024 12:26:57 +0900 Subject: [PATCH 27/69] fix #14591 - using translated content to do categories mapping --- javascript/settings.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/settings.js b/javascript/settings.js index e6009290a..b2d981c21 100644 --- a/javascript/settings.js +++ b/javascript/settings.js @@ -55,8 +55,8 @@ onOptionsChanged(function() { }); opts._categories.forEach(function(x) { - var section = x[0]; - var category = x[1]; + var section = localization[x[0]] ?? x[0]; + var category = localization[x[1]] ?? x[1]; var span = document.createElement('SPAN'); span.textContent = category; From 1da05297ea1850c6df5ef1f3d6a487d4bb4c50dd Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 22 Feb 2024 10:27:38 +0300 Subject: [PATCH 28/69] possible fix for reload button not appearing in some cases for extra networks. --- modules/ui_extra_networks.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index c03b9f081..6874a0244 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -472,7 +472,7 @@ class ExtraNetworksPage: return f"
    {res}
" - def create_card_view_html(self, tabname: str) -> str: + def create_card_view_html(self, tabname: str, *, none_message) -> str: """Generates HTML for the network Card View section for a tab. This HTML goes into the `extra-networks-pane.html`
with @@ -480,6 +480,7 @@ class ExtraNetworksPage: Args: tabname: The name of the active tab. + none_message: HTML text to show when there are no cards. Returns: HTML formatted string. @@ -490,24 +491,28 @@ class ExtraNetworksPage: if res == "": dirs = "".join([f"
  • {x}
  • " for x in self.allowed_directories_for_previews()]) - res = shared.html("extra-networks-no-cards.html").format(dirs=dirs) + res = none_message or shared.html("extra-networks-no-cards.html").format(dirs=dirs) return res - def create_html(self, tabname): + def create_html(self, tabname, *, empty=False): """Generates an HTML string for the current pane. The generated HTML uses `extra-networks-pane.html` as a template. Args: tabname: The name of the active tab. + empty: create an empty HTML page with no items Returns: HTML formatted string. """ self.lister.reset() self.metadata = {} - self.items = {x["name"]: x for x in self.list_items()} + + items_list = [] if empty else self.list_items() + self.items = {x["name"]: x for x in items_list} + # Populate the instance metadata for each item. for item in self.items.values(): metadata = item.get("metadata") @@ -536,7 +541,7 @@ class ExtraNetworksPage: "tree_view_btn_extra_class": tree_view_btn_extra_class, "tree_view_div_extra_class": tree_view_div_extra_class, "tree_html": self.create_tree_view_html(tabname), - "items_html": self.create_card_view_html(tabname), + "items_html": self.create_card_view_html(tabname, none_message="Loading..." if empty else None), } ) @@ -655,7 +660,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): pass elem_id = f"{tabname}_{page.extra_networks_tabname}_cards_html" - page_elem = gr.HTML('Loading...', elem_id=elem_id) + page_elem = gr.HTML(page.create_html(tabname, empty=True), elem_id=elem_id) ui.pages.append(page_elem) editor = page.create_user_metadata_editor(ui, tabname) editor.create_ui() From ba66cf8d69b770b171a42ae996a466aceaaf7ca3 Mon Sep 17 00:00:00 2001 From: wangshuai09 <391746016@qq.com> Date: Thu, 22 Feb 2024 20:17:10 +0800 Subject: [PATCH 29/69] update --- modules/hypernetworks/hypernetwork.py | 1 + modules/sd_hijack_clip.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index be3e46484..6082d9cb3 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -95,6 +95,7 @@ class HypernetworkModule(torch.nn.Module): zeros_(b) else: raise KeyError(f"Key {weight_init} is not defined as initialization!") + devices.torch_npu_set_device() self.to(devices.device) def fix_old_state_dict(self, state_dict): diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 98350ac43..228969dce 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -230,7 +230,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): for fixes in self.hijack.fixes: for _position, embedding in fixes: used_embeddings[embedding.name] = embedding - + devices.torch_npu_set_device() z = self.process_tokens(tokens, multipliers) zs.append(z) From 85abbbb8fa8f983222e7fffec1e686c06cf4deae Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 22 Feb 2024 17:04:56 +0400 Subject: [PATCH 30/69] support resizable columns for touch (tablets) --- javascript/resizeHandle.js | 86 +++++++++++++++++++++++++------------- 1 file changed, 56 insertions(+), 30 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 8c5c51692..13f2b3719 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -65,21 +65,31 @@ resizeHandle.classList.add('resize-handle'); parent.insertBefore(resizeHandle, rightCol); - resizeHandle.addEventListener('mousedown', (evt) => { - if (evt.button !== 0) return; + ['mousedown', 'touchstart'].forEach((eventType) => { + resizeHandle.addEventListener(eventType, (evt) => { + if (eventType.startsWith('mouse')){ + if (evt.button !== 0) return; + } else { + if (evt.changedTouches.length !== 1) return; + } - evt.preventDefault(); - evt.stopPropagation(); + evt.preventDefault(); + evt.stopPropagation(); - document.body.classList.add('resizing'); + document.body.classList.add('resizing'); - R.tracking = true; - R.parent = parent; - R.parentWidth = parent.offsetWidth; - R.handle = resizeHandle; - R.leftCol = leftCol; - R.leftColStartWidth = leftCol.offsetWidth; - R.screenX = evt.screenX; + R.tracking = true; + R.parent = parent; + R.parentWidth = parent.offsetWidth; + R.handle = resizeHandle; + R.leftCol = leftCol; + R.leftColStartWidth = leftCol.offsetWidth; + if (eventType.startsWith('mouse')){ + R.screenX = evt.screenX; + } else { + R.screenX = evt.changedTouches[0].screenX; + } + }); }); resizeHandle.addEventListener('dblclick', (evt) => { @@ -92,30 +102,46 @@ afterResize(parent); } - window.addEventListener('mousemove', (evt) => { - if (evt.button !== 0) return; + ['mousemove', 'touchmove'].forEach((eventType) => { + window.addEventListener(eventType, (evt) => { + if (eventType.startsWith('mouse')){ + if (evt.button !== 0) return; + } else { + if (evt.changedTouches.length !== 1) return; + } - if (R.tracking) { - evt.preventDefault(); - evt.stopPropagation(); - - const delta = R.screenX - evt.screenX; - const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH); - setLeftColGridTemplate(R.parent, leftColWidth); - } + if (R.tracking) { + evt.preventDefault(); + evt.stopPropagation(); + + if (eventType.startsWith('mouse')){ + var delta = R.screenX - evt.screenX; + } else { + var delta = R.screenX - evt.changedTouches[0].screenX; + } + const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH); + setLeftColGridTemplate(R.parent, leftColWidth); + } + }); }); - window.addEventListener('mouseup', (evt) => { - if (evt.button !== 0) return; + ['mouseup', 'touchend'].forEach((eventType) => { + window.addEventListener(eventType, (evt) => { + if (eventType.startsWith('mouse')){ + if (evt.button !== 0) return; + } else { + if (evt.changedTouches.length !== 1) return; + } - if (R.tracking) { - evt.preventDefault(); - evt.stopPropagation(); + if (R.tracking) { + evt.preventDefault(); + evt.stopPropagation(); - R.tracking = false; + R.tracking = false; - document.body.classList.remove('resizing'); - } + document.body.classList.remove('resizing'); + } + }); }); From ab1e0fa9bff196b4fd6f4eef560218833e6bb387 Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 22 Feb 2024 17:16:16 +0400 Subject: [PATCH 31/69] fix lint and console warning --- javascript/resizeHandle.js | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 13f2b3719..038f4cb06 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -67,7 +67,7 @@ ['mousedown', 'touchstart'].forEach((eventType) => { resizeHandle.addEventListener(eventType, (evt) => { - if (eventType.startsWith('mouse')){ + if (eventType.startsWith('mouse')) { if (evt.button !== 0) return; } else { if (evt.changedTouches.length !== 1) return; @@ -84,7 +84,7 @@ R.handle = resizeHandle; R.leftCol = leftCol; R.leftColStartWidth = leftCol.offsetWidth; - if (eventType.startsWith('mouse')){ + if (eventType.startsWith('mouse')) { R.screenX = evt.screenX; } else { R.screenX = evt.changedTouches[0].screenX; @@ -104,20 +104,23 @@ ['mousemove', 'touchmove'].forEach((eventType) => { window.addEventListener(eventType, (evt) => { - if (eventType.startsWith('mouse')){ + if (eventType.startsWith('mouse')) { if (evt.button !== 0) return; } else { if (evt.changedTouches.length !== 1) return; } if (R.tracking) { - evt.preventDefault(); + if (eventType.startsWith('mouse')) { + evt.preventDefault(); + } evt.stopPropagation(); - if (eventType.startsWith('mouse')){ - var delta = R.screenX - evt.screenX; + let delta = 0; + if (eventType.startsWith('mouse')) { + delta = R.screenX - evt.screenX; } else { - var delta = R.screenX - evt.changedTouches[0].screenX; + delta = R.screenX - evt.changedTouches[0].screenX; } const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH); setLeftColGridTemplate(R.parent, leftColWidth); @@ -127,7 +130,7 @@ ['mouseup', 'touchend'].forEach((eventType) => { window.addEventListener(eventType, (evt) => { - if (eventType.startsWith('mouse')){ + if (eventType.startsWith('mouse')) { if (evt.button !== 0) return; } else { if (evt.changedTouches.length !== 1) return; From 58985e6b372de408150fcd2dbcd6c6d5a17a3f58 Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 22 Feb 2024 17:22:00 +0400 Subject: [PATCH 32/69] fix lint 2 --- javascript/resizeHandle.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 038f4cb06..f22aa51de 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -115,7 +115,7 @@ evt.preventDefault(); } evt.stopPropagation(); - + let delta = 0; if (eventType.startsWith('mouse')) { delta = R.screenX - evt.screenX; From 3f18a09c8638cfd69848a9f39d1841848b57d036 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 22 Feb 2024 21:27:10 +0300 Subject: [PATCH 33/69] make extra network card description plaintext by default, with an option to re-enable HTML as it was --- modules/shared_options.py | 1 + modules/ui_extra_networks.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index bb3752ba6..64f8f1967 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -254,6 +254,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks", "s "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), + "extra_networks_card_description_is_html": OptionInfo(False, "Treat card description as HTML"), "extra_networks_card_order_field": OptionInfo("Path", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Path', 'Name', 'Date Created', 'Date Modified']}).needs_reload_ui(), "extra_networks_card_order": OptionInfo("Ascending", "Default order for Extra Networks cards", gr.Dropdown, {"choices": ['Ascending', 'Descending']}).needs_reload_ui(), "extra_networks_tree_view_default_enabled": OptionInfo(False, "Enables the Extra Networks directory tree view by default").needs_reload_ui(), diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 6874a0244..34c46ed40 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -289,12 +289,16 @@ class ExtraNetworksPage: } ) + description = (item.get("description", "") or "" if shared.opts.extra_networks_card_show_desc else "") + if not shared.opts.extra_networks_card_description_is_html: + description = html.escape(description) + # Some items here might not be used depending on HTML template used. args = { "background_image": background_image, "card_clicked": onclick, "copy_path_button": btn_copy_path, - "description": (item.get("description", "") or "" if shared.opts.extra_networks_card_show_desc else ""), + "description": description, "edit_button": btn_edit_item, "local_preview": quote_js(item["local_preview"]), "metadata_button": btn_metadata, From 9211febbfc9ce45bdd2dc33e73939d67924c3f1e Mon Sep 17 00:00:00 2001 From: Andray Date: Fri, 23 Feb 2024 02:20:42 +0400 Subject: [PATCH 34/69] ResizeHandleRow - allow overriden column scale parametr --- javascript/resizeHandle.js | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index f22aa51de..cd3e68c6c 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -1,6 +1,5 @@ (function() { const GRADIO_MIN_WIDTH = 320; - const GRID_TEMPLATE_COLUMNS = '1fr 16px 1fr'; const PAD = 16; const DEBOUNCE_TIME = 100; @@ -37,7 +36,7 @@ } function afterResize(parent) { - if (displayResizeHandle(parent) && parent.style.gridTemplateColumns != GRID_TEMPLATE_COLUMNS) { + if (displayResizeHandle(parent) && parent.style.gridTemplateColumns != parent.style.originalGridTemplateColumns) { const oldParentWidth = R.parentWidth; const newParentWidth = parent.offsetWidth; const widthL = parseInt(parent.style.gridTemplateColumns.split(' ')[0]); @@ -59,7 +58,9 @@ parent.style.display = 'grid'; parent.style.gap = '0'; - parent.style.gridTemplateColumns = GRID_TEMPLATE_COLUMNS; + const gridTemplateColumns = `${parent.children[0].style.flexGrow}fr ${PAD}px ${parent.children[1].style.flexGrow}fr`; + parent.style.gridTemplateColumns = gridTemplateColumns; + parent.style.originalGridTemplateColumns = gridTemplateColumns; const resizeHandle = document.createElement('div'); resizeHandle.classList.add('resize-handle'); @@ -96,7 +97,7 @@ evt.preventDefault(); evt.stopPropagation(); - parent.style.gridTemplateColumns = GRID_TEMPLATE_COLUMNS; + parent.style.gridTemplateColumns = parent.style.originalGridTemplateColumns; }); afterResize(parent); From ed594d7ba69cf065222348f5aabc0374525d8ad5 Mon Sep 17 00:00:00 2001 From: DB Eriospermum Date: Fri, 23 Feb 2024 13:37:37 +0800 Subject: [PATCH 35/69] fix: the `split_threshold` parameter does not work when running Split oversized images --- scripts/postprocessing_split_oversized.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/postprocessing_split_oversized.py b/scripts/postprocessing_split_oversized.py index c4a03160f..133e199b8 100644 --- a/scripts/postprocessing_split_oversized.py +++ b/scripts/postprocessing_split_oversized.py @@ -61,7 +61,7 @@ class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostproces ratio = (pp.image.height * width) / (pp.image.width * height) inverse_xy = True - if ratio >= 1.0 and ratio > split_threshold: + if ratio >= 1.0 or ratio > split_threshold: return result, *others = split_pic(pp.image, inverse_xy, width, height, overlap_ratio) From bab918f049dd42f53eebc241ad27607ca63cc57b Mon Sep 17 00:00:00 2001 From: Andray Date: Fri, 23 Feb 2024 18:34:24 +0400 Subject: [PATCH 36/69] fix resize-handle for vertical layout --- javascript/resizeHandle.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index f22aa51de..a3164b4ff 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -23,12 +23,14 @@ function displayResizeHandle(parent) { if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) { parent.style.display = 'flex'; + parent.querySelector('.resize-handle').style.display = "none"; if (R.handle != null) { R.handle.style.opacity = '0'; } return false; } else { parent.style.display = 'grid'; + parent.querySelector('.resize-handle').style.display = 'block'; if (R.handle != null) { R.handle.style.opacity = '100'; } From 3a99824638027ff84cf6c4af3421741cc091e617 Mon Sep 17 00:00:00 2001 From: Andray Date: Fri, 23 Feb 2024 20:26:56 +0400 Subject: [PATCH 37/69] register_tmp_file also with mtime --- modules/ui_tempdir.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 621ed1eca..ecd6bdec3 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -35,7 +35,9 @@ def save_pil_to_file(self, pil_image, dir=None, format="png"): already_saved_as = getattr(pil_image, 'already_saved_as', None) if already_saved_as and os.path.isfile(already_saved_as): register_tmp_file(shared.demo, already_saved_as) - return f'{already_saved_as}?{os.path.getmtime(already_saved_as)}' + filename_with_mtime = f'{already_saved_as}?{os.path.getmtime(already_saved_as)}' + register_tmp_file(shared.demo, filename_with_mtime) + return filename_with_mtime if shared.opts.temp_dir != "": dir = shared.opts.temp_dir From 648f6a8e0cdf5881cbec9697792e6294c54422d4 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Sun, 25 Feb 2024 23:28:36 -0500 Subject: [PATCH 38/69] dont need to preserve alphas_cumprod_original --- modules/sd_samplers_common.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index c9578ffe6..7ab1bf65a 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -181,11 +181,9 @@ def apply_refiner(cfg_denoiser): cfg_denoiser.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title cfg_denoiser.p.extra_generation_params['Refiner switch at'] = refiner_switch_at - alphas_cumprod_original = cfg_denoiser.p.sd_model.alphas_cumprod_original alphas_cumprod = cfg_denoiser.p.sd_model.alphas_cumprod with sd_models.SkipWritingToConfig(): sd_models.reload_model_weights(info=refiner_checkpoint_info) - cfg_denoiser.p.sd_model.alphas_cumprod_original = alphas_cumprod_original cfg_denoiser.p.sd_model.alphas_cumprod = alphas_cumprod devices.torch_gc() From 6e6cc2922d39fff4029d47c316c22a1c152680ce Mon Sep 17 00:00:00 2001 From: Andray Date: Mon, 26 Feb 2024 13:37:29 +0400 Subject: [PATCH 39/69] fix resize handle --- javascript/resizeHandle.js | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index a3164b4ff..ce67ca672 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -23,17 +23,11 @@ function displayResizeHandle(parent) { if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) { parent.style.display = 'flex'; - parent.querySelector('.resize-handle').style.display = "none"; - if (R.handle != null) { - R.handle.style.opacity = '0'; - } + parent.resizeHandle.style.display = "none"; return false; } else { parent.style.display = 'grid'; - parent.querySelector('.resize-handle').style.display = 'block'; - if (R.handle != null) { - R.handle.style.opacity = '100'; - } + parent.resizeHandle.style.display = "block"; return true; } } @@ -66,6 +60,7 @@ const resizeHandle = document.createElement('div'); resizeHandle.classList.add('resize-handle'); parent.insertBefore(resizeHandle, rightCol); + parent.resizeHandle = resizeHandle; ['mousedown', 'touchstart'].forEach((eventType) => { resizeHandle.addEventListener(eventType, (evt) => { @@ -83,7 +78,6 @@ R.tracking = true; R.parent = parent; R.parentWidth = parent.offsetWidth; - R.handle = resizeHandle; R.leftCol = leftCol; R.leftColStartWidth = leftCol.offsetWidth; if (eventType.startsWith('mouse')) { From dd4b0b95d5a59fa96759e5eb3937c9d268ebc2b9 Mon Sep 17 00:00:00 2001 From: Andray Date: Mon, 26 Feb 2024 16:30:15 +0400 Subject: [PATCH 40/69] cmd args: allow unix filenames and filenames max length --- modules/cmd_args.py | 4 +++- modules/images.py | 7 +++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 312dabffc..be7a59873 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -120,4 +120,6 @@ parser.add_argument('--api-server-stop', action='store_true', help='enable serve parser.add_argument('--timeout-keep-alive', type=int, default=30, help='set timeout_keep_alive for uvicorn') parser.add_argument("--disable-all-extensions", action='store_true', help="prevent all extensions from running regardless of any other settings", default=False) parser.add_argument("--disable-extra-extensions", action='store_true', help="prevent all extensions except built-in from running regardless of any other settings", default=False) -parser.add_argument("--skip-load-model-at-start", action='store_true', help="if load a model at web start, only take effect when --nowebui", ) +parser.add_argument("--skip-load-model-at-start", action='store_true', help="if load a model at web start, only take effect when --nowebui") +parser.add_argument("--unix-filenames-sanitization", action='store_true', help="allow any symbols except '/' in filenames. May conflict with your browser and file system") +parser.add_argument("--filenames-max-length", type=int, default=128, help='maximal length of filenames of saved images. If you override it, it can conflict with your file system') diff --git a/modules/images.py b/modules/images.py index b6f2358c3..e7d111723 100644 --- a/modules/images.py +++ b/modules/images.py @@ -321,13 +321,16 @@ def resize_image(resize_mode, im, width, height, upscaler_name=None): return res -invalid_filename_chars = '#<>:"/\\|?*\n\r\t' +if not shared.cmd_opts.unix_filenames_sanitization: + invalid_filename_chars = '#<>:"/\\|?*\n\r\t' +else: + invalid_filename_chars = '/' invalid_filename_prefix = ' ' invalid_filename_postfix = ' .' re_nonletters = re.compile(r'[\s' + string.punctuation + ']+') re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)") re_pattern_arg = re.compile(r"(.*)<([^>]*)>$") -max_filename_part_length = 128 +max_filename_part_length = shared.cmd_opts.filenames_max_length NOTHING_AND_SKIP_PREVIOUS_TEXT = object() From 3a618e3d24394aef0f8682ded713ef1b6c265553 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Mon, 26 Feb 2024 12:44:57 -0500 Subject: [PATCH 41/69] Fix normalized filepath, resolve -> absolute https://github.com/lllyasviel/stable-diffusion-webui-forge/issues/313 https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions/14942#discussioncomment-8550050 --- modules/paths_internal.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/paths_internal.py b/modules/paths_internal.py index 2ed1392a4..6058b0cde 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -7,7 +7,7 @@ import shlex from pathlib import Path -normalized_filepath = lambda filepath: str(Path(filepath).resolve()) +normalized_filepath = lambda filepath: str(Path(filepath).absolute()) commandline_args = os.environ.get('COMMANDLINE_ARGS', "") sys.argv += shlex.split(commandline_args) From e2cd92ea230801ecc5fc7ed90e14ab55c946fb4a Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Mon, 26 Feb 2024 23:43:27 -0500 Subject: [PATCH 42/69] move refiner fix to sd_models.py --- modules/sd_models.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/modules/sd_models.py b/modules/sd_models.py index 2c0457715..fbd53adba 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -15,6 +15,7 @@ from ldm.util import instantiate_from_config from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack, patches from modules.timer import Timer +from modules.shared import opts import tomesd import numpy as np @@ -549,6 +550,36 @@ def repair_config(sd_config): karlo_path = os.path.join(paths.models_path, 'karlo') sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path) +def apply_alpha_schedule_override(sd_model, p=None): + def rescale_zero_terminal_snr_abar(alphas_cumprod): + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= (alphas_bar_sqrt_T) + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas_bar[-1] = 4.8973451890853435e-08 + return alphas_bar + + if hasattr(sd_model, 'alphas_cumprod') and hasattr(sd_model, 'alphas_cumprod_original'): + sd_model.alphas_cumprod = sd_model.alphas_cumprod_original.to(shared.device) + + if opts.use_downcasted_alpha_bar: + if p is not None: + p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar + sd_model.alphas_cumprod = sd_model.alphas_cumprod.half().to(shared.device) + if opts.sd_noise_schedule == "Zero Terminal SNR": + if p is not None: + p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule + sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(sd_model.alphas_cumprod).to(shared.device) sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight' sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight' @@ -812,6 +843,7 @@ def reload_model_weights(sd_model=None, info=None, forced_reload=False): sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer) if not forced_reload and sd_model is not None and sd_model.sd_checkpoint_info.filename == checkpoint_info.filename: + apply_alpha_schedule_override(sd_model) return sd_model if sd_model is not None: From 94f23d00a76e7988f4b73ced1fa2922801e893fb Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Mon, 26 Feb 2024 23:44:58 -0500 Subject: [PATCH 43/69] move alphas cumprod override out of processing --- modules/processing.py | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index d208a922d..411c7c3f4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -915,33 +915,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.n_iter > 1: shared.state.job = f"Batch {n+1} out of {p.n_iter}" - def rescale_zero_terminal_snr_abar(alphas_cumprod): - alphas_bar_sqrt = alphas_cumprod.sqrt() - - # Store old values. - alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() - alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() - - # Shift so the last timestep is zero. - alphas_bar_sqrt -= (alphas_bar_sqrt_T) - - # Scale so the first timestep is back to the old value. - alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) - - # Convert alphas_bar_sqrt to betas - alphas_bar = alphas_bar_sqrt**2 # Revert sqrt - alphas_bar[-1] = 4.8973451890853435e-08 - return alphas_bar - - if hasattr(p.sd_model, 'alphas_cumprod') and hasattr(p.sd_model, 'alphas_cumprod_original'): - p.sd_model.alphas_cumprod = p.sd_model.alphas_cumprod_original.to(shared.device) - - if opts.use_downcasted_alpha_bar: - p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar - p.sd_model.alphas_cumprod = p.sd_model.alphas_cumprod.half().to(shared.device) - if opts.sd_noise_schedule == "Zero Terminal SNR": - p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule - p.sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(p.sd_model.alphas_cumprod).to(shared.device) + sd_models.apply_alpha_schedule_override(p.sd_model, p) with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) From 4dae91a1fe960ad9a9774f8f5407ef67c1a109f9 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Mon, 26 Feb 2024 23:46:10 -0500 Subject: [PATCH 44/69] remove alphas cumprod fix from samplers_common --- modules/sd_samplers_common.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 7ab1bf65a..6bd38e12a 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -181,10 +181,8 @@ def apply_refiner(cfg_denoiser): cfg_denoiser.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title cfg_denoiser.p.extra_generation_params['Refiner switch at'] = refiner_switch_at - alphas_cumprod = cfg_denoiser.p.sd_model.alphas_cumprod with sd_models.SkipWritingToConfig(): sd_models.reload_model_weights(info=refiner_checkpoint_info) - cfg_denoiser.p.sd_model.alphas_cumprod = alphas_cumprod devices.torch_gc() cfg_denoiser.p.setup_conds() From 3ba575216a8e7df307562ba8bc68a8717798daef Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 27 Feb 2024 15:10:51 +0400 Subject: [PATCH 45/69] dat cmd flag --- modules/cmd_args.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 312dabffc..213cba98c 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -53,6 +53,7 @@ parser.add_argument("--gfpgan-models-path", type=normalized_filepath, help="Path parser.add_argument("--esrgan-models-path", type=normalized_filepath, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN')) parser.add_argument("--bsrgan-models-path", type=normalized_filepath, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN')) parser.add_argument("--realesrgan-models-path", type=normalized_filepath, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN')) +parser.add_argument("--dat-models-path", type=normalized_filepath, help="Path to directory with DAT model file(s).", default=os.path.join(models_path, 'DAT')) parser.add_argument("--clip-models-path", type=normalized_filepath, help="Path to directory with CLIP model file(s).", default=None) parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers") parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work") From 44bce3c74ee745b9776d965e02ae006e6b4fe3fb Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 27 Feb 2024 18:31:36 +0400 Subject: [PATCH 46/69] resize handle for extra networks --- html/extra-networks-pane.html | 6 +++--- javascript/extraNetworks.js | 19 ++++++++++++++++++- javascript/resizeHandle.js | 22 +++++++++++++++++++--- modules/shared_options.py | 1 + modules/ui_extra_networks.py | 9 ++++++++- style.css | 3 ++- 6 files changed, 51 insertions(+), 9 deletions(-) diff --git a/html/extra-networks-pane.html b/html/extra-networks-pane.html index 0c763f710..f54344aaa 100644 --- a/html/extra-networks-pane.html +++ b/html/extra-networks-pane.html @@ -44,11 +44,11 @@
    -
    -
    +
    +
    {tree_html}
    -
    +
    {items_html}
    diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index d5855fe96..ff808d7aa 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -447,7 +447,24 @@ function extraNetworksControlTreeViewOnClick(event, tabname, extra_networks_tabn * @param tabname The name of the active tab in the sd webui. Ex: txt2img, img2img, etc. * @param extra_networks_tabname The id of the active extraNetworks tab. Ex: lora, checkpoints, etc. */ - gradioApp().getElementById(tabname + "_" + extra_networks_tabname + "_tree").classList.toggle("hidden"); + const tree = gradioApp().getElementById(tabname + "_" + extra_networks_tabname + "_tree"); + const parent = tree.parentElement; + let resizeHandle = parent.querySelector('.resize-handle'); + tree.classList.toggle("hidden"); + + if (tree.classList.contains("hidden")){ + tree.style.display = 'none'; + resizeHandle.style.display = 'none'; + parent.style.display = 'flex'; + } else { + tree.style.display = 'block'; + if (!resizeHandle) { + setupResizeHandle(parent); + resizeHandle = parent.querySelector('.resize-handle'); + } + resizeHandle.style.display = 'block'; + parent.style.display = 'grid'; + } event.currentTarget.classList.toggle("extra-network-control--enabled"); } diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 6560372cc..5fb5dd4f3 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -39,7 +39,7 @@ const ratio = newParentWidth / oldParentWidth; - const newWidthL = Math.max(Math.floor(ratio * widthL), GRADIO_MIN_WIDTH); + const newWidthL = Math.max(Math.floor(ratio * widthL), parent.minLeftColWidth); setLeftColGridTemplate(parent, newWidthL); R.parentWidth = newParentWidth; @@ -54,7 +54,15 @@ parent.style.display = 'grid'; parent.style.gap = '0'; - const gridTemplateColumns = `${parent.children[0].style.flexGrow}fr ${PAD}px ${parent.children[1].style.flexGrow}fr`; + let leftColTemplate = ""; + if (parent.children[0].style.flexGrow) { + leftColTemplate = `${parent.children[0].style.flexGrow}fr`; + parent.minLeftColWidth = GRADIO_MIN_WIDTH; + } else { + leftColTemplate = parent.children[0].style.flexBasis; + parent.minLeftColWidth = parent.children[0].style.flexBasis.slice(0, -2); + } + const gridTemplateColumns = `${leftColTemplate} ${PAD}px ${parent.children[1].style.flexGrow}fr`; parent.style.gridTemplateColumns = gridTemplateColumns; parent.style.originalGridTemplateColumns = gridTemplateColumns; @@ -119,7 +127,7 @@ } else { delta = R.screenX - evt.changedTouches[0].screenX; } - const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH); + const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), R.parent.minLeftColWidth); setLeftColGridTemplate(R.parent, leftColWidth); } }); @@ -165,3 +173,11 @@ onUiLoaded(function() { } } }); + +function setupExtraNetworksResizeHandle() { + for (var elem of document.body.querySelectorAll('.resize-handle-row')) { + if (!elem.querySelector('.resize-handle') && !elem.children[0].classList.contains("hidden")) { + setupResizeHandle(elem); + } + } +} \ No newline at end of file diff --git a/modules/shared_options.py b/modules/shared_options.py index 64f8f1967..285c54158 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -258,6 +258,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks", "s "extra_networks_card_order_field": OptionInfo("Path", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Path', 'Name', 'Date Created', 'Date Modified']}).needs_reload_ui(), "extra_networks_card_order": OptionInfo("Ascending", "Default order for Extra Networks cards", gr.Dropdown, {"choices": ['Ascending', 'Descending']}).needs_reload_ui(), "extra_networks_tree_view_default_enabled": OptionInfo(False, "Enables the Extra Networks directory tree view by default").needs_reload_ui(), + "extra_networks_tree_view_min_width": OptionInfo(180, "Minimal width for the Extra Networks directory tree view", gr.Number).needs_reload_ui(), "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 34c46ed40..09705a98c 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -531,9 +531,13 @@ class ExtraNetworksPage: data_sortkey = f"{data_sortmode}-{data_sortdir}-{len(self.items)}" tree_view_btn_extra_class = "" tree_view_div_extra_class = "hidden" + tree_view_div_default_display = "none" + extra_network_pane_content_default_display = "flex" if shared.opts.extra_networks_tree_view_default_enabled: tree_view_btn_extra_class = "extra-network-control--enabled" tree_view_div_extra_class = "" + tree_view_div_default_display = "block" + extra_network_pane_content_default_display = "grid" return self.pane_tpl.format( **{ @@ -546,6 +550,9 @@ class ExtraNetworksPage: "tree_view_div_extra_class": tree_view_div_extra_class, "tree_html": self.create_tree_view_html(tabname), "items_html": self.create_card_view_html(tabname, none_message="Loading..." if empty else None), + "extra_networks_tree_view_min_width": shared.opts.extra_networks_tree_view_min_width, + "tree_view_div_default_display": tree_view_div_default_display, + "extra_network_pane_content_default_display": extra_network_pane_content_default_display, } ) @@ -703,7 +710,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): create_html() return ui.pages_contents - interface.load(fn=pages_html, inputs=[], outputs=ui.pages) + interface.load(fn=pages_html, inputs=[], outputs=ui.pages).then(fn=lambda: None, _js='setupExtraNetworksResizeHandle') return ui diff --git a/style.css b/style.css index 8ce78ff0c..004038f89 100644 --- a/style.css +++ b/style.css @@ -1615,9 +1615,10 @@ body.resizing .resize-handle { display: inline-flex; visibility: hidden; color: var(--button-secondary-text-color); - + width: 0; } .extra-network-tree .tree-list-content:hover .button-row { visibility: visible; + width: auto; } From de7604fa77180ac8d51da4f8a59c5a27bbe25cdc Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 27 Feb 2024 18:38:38 +0400 Subject: [PATCH 47/69] lint --- javascript/extraNetworks.js | 2 +- javascript/resizeHandle.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index ff808d7aa..4e30261b8 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -452,7 +452,7 @@ function extraNetworksControlTreeViewOnClick(event, tabname, extra_networks_tabn let resizeHandle = parent.querySelector('.resize-handle'); tree.classList.toggle("hidden"); - if (tree.classList.contains("hidden")){ + if (tree.classList.contains("hidden")) { tree.style.display = 'none'; resizeHandle.style.display = 'none'; parent.style.display = 'flex'; diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 5fb5dd4f3..cf2c778bb 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -180,4 +180,4 @@ function setupExtraNetworksResizeHandle() { setupResizeHandle(elem); } } -} \ No newline at end of file +} From b4c44e659ba3931d4bee0a0061c674e594cc639f Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 27 Feb 2024 23:17:52 +0400 Subject: [PATCH 48/69] fix on reload with changed show all loras setting --- javascript/extraNetworks.js | 6 ++++-- javascript/resizeHandle.js | 15 ++++++--------- modules/ui_extra_networks.py | 4 ++-- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 4e30261b8..1610698bf 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -454,16 +454,18 @@ function extraNetworksControlTreeViewOnClick(event, tabname, extra_networks_tabn if (tree.classList.contains("hidden")) { tree.style.display = 'none'; - resizeHandle.style.display = 'none'; parent.style.display = 'flex'; + if (resizeHandle) { + resizeHandle.style.display = 'none'; + } } else { tree.style.display = 'block'; + parent.style.display = 'grid'; if (!resizeHandle) { setupResizeHandle(parent); resizeHandle = parent.querySelector('.resize-handle'); } resizeHandle.style.display = 'block'; - parent.style.display = 'grid'; } event.currentTarget.classList.toggle("extra-network-control--enabled"); } diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index cf2c778bb..94ae4aaa2 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -166,18 +166,15 @@ setupResizeHandle = setup; })(); -onUiLoaded(function() { - for (var elem of gradioApp().querySelectorAll('.resize-handle-row')) { - if (!elem.querySelector('.resize-handle')) { - setupResizeHandle(elem); - } - } -}); -function setupExtraNetworksResizeHandle() { - for (var elem of document.body.querySelectorAll('.resize-handle-row')) { +function setupAllResizeHandles() { + for (var elem of gradioApp().querySelectorAll('.resize-handle-row')) { if (!elem.querySelector('.resize-handle') && !elem.children[0].classList.contains("hidden")) { setupResizeHandle(elem); } } } + + +onUiLoaded(setupAllResizeHandles); + diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 09705a98c..9d8f8b28b 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -700,7 +700,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): return ui.pages_contents button_refresh = gr.Button("Refresh", elem_id=f"{tabname}_{page.extra_networks_tabname}_extra_refresh_internal", visible=False) - button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages).then(fn=lambda: None, _js="function(){ " + f"applyExtraNetworkFilter('{tabname}_{page.extra_networks_tabname}');" + " }") + button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages).then(fn=lambda: None, _js="function(){ " + f"applyExtraNetworkFilter('{tabname}_{page.extra_networks_tabname}');" + " }").then(fn=lambda: None, _js='setupAllResizeHandles') def create_html(): ui.pages_contents = [pg.create_html(ui.tabname) for pg in ui.stored_extra_pages] @@ -710,7 +710,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): create_html() return ui.pages_contents - interface.load(fn=pages_html, inputs=[], outputs=ui.pages).then(fn=lambda: None, _js='setupExtraNetworksResizeHandle') + interface.load(fn=pages_html, inputs=[], outputs=ui.pages).then(fn=lambda: None, _js='setupAllResizeHandles') return ui From 51cc1ff2c9d47e66221c7abfb244e4d058c8b279 Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 27 Feb 2024 23:31:47 +0400 Subject: [PATCH 49/69] fix for mobile and allow collapse right column --- javascript/resizeHandle.js | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 94ae4aaa2..4fe9cbdff 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -20,6 +20,9 @@ } function displayResizeHandle(parent) { + if (!parent.needHideOnMoblie) { + return true; + } if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) { parent.style.display = 'flex'; parent.resizeHandle.style.display = "none"; @@ -58,9 +61,13 @@ if (parent.children[0].style.flexGrow) { leftColTemplate = `${parent.children[0].style.flexGrow}fr`; parent.minLeftColWidth = GRADIO_MIN_WIDTH; + parent.minRightColWidth = GRADIO_MIN_WIDTH; + parent.needHideOnMoblie = true; } else { leftColTemplate = parent.children[0].style.flexBasis; parent.minLeftColWidth = parent.children[0].style.flexBasis.slice(0, -2); + parent.minRightColWidth = 0; + parent.needHideOnMoblie = false; } const gridTemplateColumns = `${leftColTemplate} ${PAD}px ${parent.children[1].style.flexGrow}fr`; parent.style.gridTemplateColumns = gridTemplateColumns; @@ -127,7 +134,7 @@ } else { delta = R.screenX - evt.changedTouches[0].screenX; } - const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), R.parent.minLeftColWidth); + const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - R.parent.minRightColWidth - PAD), R.parent.minLeftColWidth); setLeftColGridTemplate(R.parent, leftColWidth); } }); From bce09eb9871e08fda07b8d6ff78d4d19307574db Mon Sep 17 00:00:00 2001 From: Dalton Date: Thu, 29 Feb 2024 01:04:46 -0500 Subject: [PATCH 50/69] Add a direct link to the binary release --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 29506f249..d1a086ad6 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -56,7 +56,7 @@ and delete current Python and "venv" folder in WebUI's directory. You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3106/ -{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""} +{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre" if is_windows else ""} Use --skip-python-version-check to suppress this warning. """) From bb99f5271241565bfd98d2a1fdba59350a5aeb39 Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 29 Feb 2024 15:40:15 +0400 Subject: [PATCH 51/69] resizeHandle handle double tap --- javascript/resizeHandle.js | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 6560372cc..c4e9de581 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -2,6 +2,7 @@ const GRADIO_MIN_WIDTH = 320; const PAD = 16; const DEBOUNCE_TIME = 100; + const DOUBLE_TAP_DELAY = 200; //ms const R = { tracking: false, @@ -10,6 +11,7 @@ leftCol: null, leftColStartWidth: null, screenX: null, + lastTapTime: null, }; let resizeTimer; @@ -47,6 +49,14 @@ } function setup(parent) { + + function onDoubleClick(evt) { + evt.preventDefault(); + evt.stopPropagation(); + + parent.style.gridTemplateColumns = parent.style.originalGridTemplateColumns; + } + const leftCol = parent.firstElementChild; const rightCol = parent.lastElementChild; @@ -69,6 +79,14 @@ if (evt.button !== 0) return; } else { if (evt.changedTouches.length !== 1) return; + + const currentTime = new Date().getTime(); + if (R.lastTapTime && currentTime - R.lastTapTime <= DOUBLE_TAP_DELAY) { + onDoubleClick(evt); + return; + } + + R.lastTapTime = currentTime; } evt.preventDefault(); @@ -89,12 +107,7 @@ }); }); - resizeHandle.addEventListener('dblclick', (evt) => { - evt.preventDefault(); - evt.stopPropagation(); - - parent.style.gridTemplateColumns = parent.style.originalGridTemplateColumns; - }); + resizeHandle.addEventListener('dblclick', onDoubleClick); afterResize(parent); } From eb0b84c5643896385ba6dd242c6815b288618355 Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 29 Feb 2024 16:02:21 +0400 Subject: [PATCH 52/69] make minimal width 2 times smaller then default --- html/extra-networks-pane.html | 2 +- javascript/resizeHandle.js | 2 +- modules/shared_options.py | 2 +- modules/ui_extra_networks.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/html/extra-networks-pane.html b/html/extra-networks-pane.html index f54344aaa..02a871086 100644 --- a/html/extra-networks-pane.html +++ b/html/extra-networks-pane.html @@ -45,7 +45,7 @@
    -
    +
    {tree_html}
    diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 4fe9cbdff..513198f53 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -65,7 +65,7 @@ parent.needHideOnMoblie = true; } else { leftColTemplate = parent.children[0].style.flexBasis; - parent.minLeftColWidth = parent.children[0].style.flexBasis.slice(0, -2); + parent.minLeftColWidth = parent.children[0].style.flexBasis.slice(0, -2) / 2; parent.minRightColWidth = 0; parent.needHideOnMoblie = false; } diff --git a/modules/shared_options.py b/modules/shared_options.py index 285c54158..aa26588df 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -258,7 +258,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks", "s "extra_networks_card_order_field": OptionInfo("Path", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Path', 'Name', 'Date Created', 'Date Modified']}).needs_reload_ui(), "extra_networks_card_order": OptionInfo("Ascending", "Default order for Extra Networks cards", gr.Dropdown, {"choices": ['Ascending', 'Descending']}).needs_reload_ui(), "extra_networks_tree_view_default_enabled": OptionInfo(False, "Enables the Extra Networks directory tree view by default").needs_reload_ui(), - "extra_networks_tree_view_min_width": OptionInfo(180, "Minimal width for the Extra Networks directory tree view", gr.Number).needs_reload_ui(), + "extra_networks_tree_view_default_width": OptionInfo(180, "Default width for the Extra Networks directory tree view", gr.Number).needs_reload_ui(), "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 9d8f8b28b..ad2c23054 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -550,7 +550,7 @@ class ExtraNetworksPage: "tree_view_div_extra_class": tree_view_div_extra_class, "tree_html": self.create_tree_view_html(tabname), "items_html": self.create_card_view_html(tabname, none_message="Loading..." if empty else None), - "extra_networks_tree_view_min_width": shared.opts.extra_networks_tree_view_min_width, + "extra_networks_tree_view_default_width": shared.opts.extra_networks_tree_view_default_width, "tree_view_div_default_display": tree_view_div_default_display, "extra_network_pane_content_default_display": extra_network_pane_content_default_display, } From 1a51b166a04245f5e2ccdfc1300be3be79345bc3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 06:53:53 +0300 Subject: [PATCH 53/69] call apply_alpha_schedule_override in load_model_weights for #14979 --- modules/sd_models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index fbd53adba..db72e120f 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -428,6 +428,8 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer devices.dtype_unet = torch.float16 timer.record("apply half()") + apply_alpha_schedule_override(model) + for module in model.modules(): if hasattr(module, 'fp16_weight'): del module.fp16_weight @@ -843,7 +845,6 @@ def reload_model_weights(sd_model=None, info=None, forced_reload=False): sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer) if not forced_reload and sd_model is not None and sd_model.sd_checkpoint_info.filename == checkpoint_info.filename: - apply_alpha_schedule_override(sd_model) return sd_model if sd_model is not None: From ee470cc6a32ae0c89ca32d71adac02b2d434f59a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 06:54:11 +0300 Subject: [PATCH 54/69] style changes for #14979 --- modules/sd_models.py | 60 ++++++++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 24 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index db72e120f..747fc39ee 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -552,36 +552,48 @@ def repair_config(sd_config): karlo_path = os.path.join(paths.models_path, 'karlo') sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path) + +def rescale_zero_terminal_snr_abar(alphas_cumprod): + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= (alphas_bar_sqrt_T) + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt ** 2 # Revert sqrt + alphas_bar[-1] = 4.8973451890853435e-08 + return alphas_bar + + def apply_alpha_schedule_override(sd_model, p=None): - def rescale_zero_terminal_snr_abar(alphas_cumprod): - alphas_bar_sqrt = alphas_cumprod.sqrt() + """ + Applies an override to the alpha schedule of the model according to settings. + - downcasts the alpha schedule to half precision + - rescales the alpha schedule to have zero terminal SNR + """ - # Store old values. - alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() - alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + if not hasattr(sd_model, 'alphas_cumprod') or not hasattr(sd_model, 'alphas_cumprod_original'): + return - # Shift so the last timestep is zero. - alphas_bar_sqrt -= (alphas_bar_sqrt_T) + sd_model.alphas_cumprod = sd_model.alphas_cumprod_original.to(shared.device) - # Scale so the first timestep is back to the old value. - alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + if opts.use_downcasted_alpha_bar: + if p is not None: + p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar + sd_model.alphas_cumprod = sd_model.alphas_cumprod.half().to(shared.device) - # Convert alphas_bar_sqrt to betas - alphas_bar = alphas_bar_sqrt**2 # Revert sqrt - alphas_bar[-1] = 4.8973451890853435e-08 - return alphas_bar + if opts.sd_noise_schedule == "Zero Terminal SNR": + if p is not None: + p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule + sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(sd_model.alphas_cumprod).to(shared.device) - if hasattr(sd_model, 'alphas_cumprod') and hasattr(sd_model, 'alphas_cumprod_original'): - sd_model.alphas_cumprod = sd_model.alphas_cumprod_original.to(shared.device) - - if opts.use_downcasted_alpha_bar: - if p is not None: - p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar - sd_model.alphas_cumprod = sd_model.alphas_cumprod.half().to(shared.device) - if opts.sd_noise_schedule == "Zero Terminal SNR": - if p is not None: - p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule - sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(sd_model.alphas_cumprod).to(shared.device) sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight' sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight' From bb24c13ed7910e9e6255e3d7ff3d81ba40468fc0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 07:39:59 +0300 Subject: [PATCH 55/69] infotext support for #14978 --- modules/infotext_utils.py | 3 +++ modules/infotext_versions.py | 3 +++ modules/sd_samplers_common.py | 8 +++++--- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index a938aa2a7..e04a7bee9 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -359,6 +359,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "Emphasis" not in res: res["Emphasis"] = "Original" + if "Refiner switch by sampling steps" not in res: + res["Refiner switch by sampling steps"] = False + infotext_versions.backcompat(res) for key in skip_fields: diff --git a/modules/infotext_versions.py b/modules/infotext_versions.py index 23b45c3f9..b5552a312 100644 --- a/modules/infotext_versions.py +++ b/modules/infotext_versions.py @@ -5,6 +5,7 @@ import re v160 = version.parse("1.6.0") v170_tsnr = version.parse("v1.7.0-225") +v180 = version.parse("1.8.0") def parse_version(text): @@ -40,3 +41,5 @@ def backcompat(d): if ver < v170_tsnr: d["Downcast alphas_cumprod"] = True + if ver < v180 and d.get('Refiner'): + d["Refiner switch by sampling steps"] = True diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 045b9e2fe..6df423912 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -155,14 +155,16 @@ def replace_torchsde_browinan(): replace_torchsde_browinan() -def apply_refiner(cfg_denoiser, sigma): - if opts.refiner_switch_by_sample_steps: +def apply_refiner(cfg_denoiser, sigma=None): + if opts.refiner_switch_by_sample_steps or not sigma: completed_ratio = cfg_denoiser.step / cfg_denoiser.total_steps + cfg_denoiser.p.extra_generation_params["Refiner switch by sampling steps"] = True + else: # torch.max(sigma) only to handle rare case where we might have different sigmas in the same batch try: timestep = torch.argmin(torch.abs(cfg_denoiser.inner_model.sigmas - torch.max(sigma))) - except AttributeError: # for samplers that dont use sigmas (DDIM) sigma is actually the timestep + except AttributeError: # for samplers that don't use sigmas (DDIM) sigma is actually the timestep timestep = torch.max(sigma).to(dtype=int) completed_ratio = (999 - timestep) / 1000 From 45b8a499a7e6d732b1711a0016c211f2b3c19232 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 10:36:48 +0300 Subject: [PATCH 56/69] fix wrong condition --- modules/sd_samplers_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 6df423912..bda578cc5 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -156,7 +156,7 @@ replace_torchsde_browinan() def apply_refiner(cfg_denoiser, sigma=None): - if opts.refiner_switch_by_sample_steps or not sigma: + if opts.refiner_switch_by_sample_steps or sigma is None: completed_ratio = cfg_denoiser.step / cfg_denoiser.total_steps cfg_denoiser.p.extra_generation_params["Refiner switch by sampling steps"] = True From 01033656975cd0622aa3711352101751dfa6b1c3 Mon Sep 17 00:00:00 2001 From: Andray Date: Sun, 3 Mar 2024 16:54:58 +0400 Subject: [PATCH 57/69] fix_jpeg_live_preview --- modules/shared_state.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/shared_state.py b/modules/shared_state.py index 33996691c..759a47481 100644 --- a/modules/shared_state.py +++ b/modules/shared_state.py @@ -162,5 +162,7 @@ class State: errors.record_exception() def assign_current_image(self, image): + if shared.opts.live_previews_image_format == 'jpeg' and image.mode == 'RGBA': + image = image.convert('RGB') self.current_image = image self.id_live_preview += 1 From 3c0177a24b496be5b643b76348afe6a5ff30a59f Mon Sep 17 00:00:00 2001 From: Christopher Layne Date: Sat, 2 Mar 2024 08:00:20 -0800 Subject: [PATCH 58/69] upscaler_utils: Reduce logging * upscale_with_model: Remove debugging logging occurring in loop as it's an excessive amount of noise when running w/ DEBUG log levels. --- modules/upscaler_utils.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/modules/upscaler_utils.py b/modules/upscaler_utils.py index b5e5a80ca..17223ca0d 100644 --- a/modules/upscaler_utils.py +++ b/modules/upscaler_utils.py @@ -69,10 +69,8 @@ def upscale_with_model( for y, h, row in grid.tiles: newrow = [] for x, w, tile in row: - logger.debug("Tile (%d, %d) %s...", x, y, tile) output = upscale_pil_patch(model, tile) scale_factor = output.width // tile.width - logger.debug("=> %s (scale factor %s)", output, scale_factor) newrow.append([x * scale_factor, w * scale_factor, output]) p.update(1) newtiles.append([y * scale_factor, h * scale_factor, newrow]) From e3fa46f26f78c01969eaca2708be4e2b4928c5a2 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Mon, 4 Mar 2024 08:37:23 +0200 Subject: [PATCH 59/69] Fix various typos with crate-ci/typos --- CHANGELOG.md | 18 +++++++++--------- _typos.toml | 5 +++++ extensions-builtin/LDSR/sd_hijack_ddpm_v1.py | 8 ++++---- extensions-builtin/Lora/lyco_helpers.py | 2 +- extensions-builtin/Lora/networks.py | 2 +- .../canvas-zoom-and-pan/javascript/zoom.js | 4 ++-- .../scripts/hotkey_config.py | 4 ++-- .../soft-inpainting/scripts/soft_inpainting.py | 2 +- javascript/aspectRatioOverlay.js | 12 ++++++------ javascript/extraNetworks.js | 2 +- javascript/ui.js | 2 +- modules/api/api.py | 6 +++--- modules/call_queue.py | 4 ++-- modules/devices.py | 2 +- modules/extra_networks.py | 2 +- modules/initialize.py | 2 +- modules/mac_specific.py | 2 +- modules/modelloader.py | 6 +++--- modules/models/diffusion/ddpm_edit.py | 8 ++++---- modules/rng.py | 4 ++-- modules/scripts.py | 4 ++-- modules/sd_emphasis.py | 4 ++-- modules/sd_hijack_clip.py | 8 ++++---- modules/sd_models.py | 2 +- modules/shared.py | 2 +- modules/shared_options.py | 4 ++-- modules/shared_state.py | 2 +- modules/textual_inversion/autocrop.py | 4 ++-- modules/textual_inversion/image_embedding.py | 6 +++--- modules/textual_inversion/textual_inversion.py | 2 +- modules/ui_common.py | 2 +- modules/ui_components.py | 2 +- modules/ui_extensions.py | 2 +- modules/ui_prompt_styles.py | 2 +- scripts/outpainting_mk_2.py | 2 +- scripts/xyz_grid.py | 2 +- 36 files changed, 76 insertions(+), 71 deletions(-) create mode 100644 _typos.toml diff --git a/CHANGELOG.md b/CHANGELOG.md index f0c659811..0df47801b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ * Add support for DAT upscaler models ([#14690](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14690), [#15039](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15039)) * Extra Networks Tree View ([#14588](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14588), [#14900](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14900)) * NPU Support ([#14801](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14801)) -* Propmpt comments support +* Prompt comments support ### Minor: * Allow pasting in WIDTHxHEIGHT strings into the width/height fields ([#14296](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14296)) @@ -59,7 +59,7 @@ * modules/api/api.py: add api endpoint to refresh embeddings list ([#14715](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14715)) * set_named_arg ([#14773](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14773)) * add before_token_counter callback and use it for prompt comments -* ResizeHandleRow - allow overriden column scale parameter ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004)) +* ResizeHandleRow - allow overridden column scale parameter ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004)) ### Performance * Massive performance improvement for extra networks directories with a huge number of files in them in an attempt to tackle #14507 ([#14528](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14528)) @@ -101,7 +101,7 @@ * Gracefully handle mtime read exception from cache ([#14933](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14933)) * Only trigger interrupt on `Esc` when interrupt button visible ([#14932](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14932)) * Disable prompt token counters option actually disables token counting rather than just hiding results. -* avoid doble upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966)) +* avoid double upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966)) * Fix #14591 using translated content to do categories mapping ([#14995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14995)) * fix: the `split_threshold` parameter does not work when running Split oversized images ([#15006](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15006)) * Fix resize-handle for mobile ([#15010](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15010), [#15065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15065)) @@ -171,7 +171,7 @@ * infotext updates: add option to disregard certain infotext fields, add option to not include VAE in infotext, add explanation to infotext settings page, move some options to infotext settings page * add FP32 fallback support on sd_vae_approx ([#14046](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046)) * support XYZ scripts / split hires path from unet ([#14126](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14126)) -* allow use of mutiple styles csv files ([#14125](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14125)) +* allow use of multiple styles csv files ([#14125](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14125)) * make extra network card description plaintext by default, with an option (Treat card description as HTML) to re-enable HTML as it was (originally by [#13241](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13241)) ### Extensions and API: @@ -308,7 +308,7 @@ * new samplers: Restart, DPM++ 2M SDE Exponential, DPM++ 2M SDE Heun, DPM++ 2M SDE Heun Karras, DPM++ 2M SDE Heun Exponential, DPM++ 3M SDE, DPM++ 3M SDE Karras, DPM++ 3M SDE Exponential ([#12300](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12300), [#12519](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12519), [#12542](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12542)) * rework DDIM, PLMS, UniPC to use CFG denoiser same as in k-diffusion samplers: * makes all of them work with img2img - * makes prompt composition posssible (AND) + * makes prompt composition possible (AND) * makes them available for SDXL * always show extra networks tabs in the UI ([#11808](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11808)) * use less RAM when creating models ([#11958](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/11958), [#12599](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12599)) @@ -484,7 +484,7 @@ * user metadata system for custom networks * extended Lora metadata editor: set activation text, default weight, view tags, training info * Lora extension rework to include other types of networks (all that were previously handled by LyCORIS extension) - * show github stars for extenstions + * show github stars for extensions * img2img batch mode can read extra stuff from png info * img2img batch works with subdirectories * hotkeys to move prompt elements: alt+left/right @@ -703,7 +703,7 @@ * do not wait for Stable Diffusion model to load at startup * add filename patterns: `[denoising]` * directory hiding for extra networks: dirs starting with `.` will hide their cards on extra network tabs unless specifically searched for - * LoRA: for the `<...>` text in prompt, use name of LoRA that is in the metdata of the file, if present, instead of filename (both can be used to activate LoRA) + * LoRA: for the `<...>` text in prompt, use name of LoRA that is in the metadata of the file, if present, instead of filename (both can be used to activate LoRA) * LoRA: read infotext params from kohya-ss's extension parameters if they are present and if his extension is not active * LoRA: fix some LoRAs not working (ones that have 3x3 convolution layer) * LoRA: add an option to use old method of applying LoRAs (producing same results as with kohya-ss) @@ -733,7 +733,7 @@ * fix gamepad navigation * make the lightbox fullscreen image function properly * fix squished thumbnails in extras tab - * keep "search" filter for extra networks when user refreshes the tab (previously it showed everthing after you refreshed) + * keep "search" filter for extra networks when user refreshes the tab (previously it showed everything after you refreshed) * fix webui showing the same image if you configure the generation to always save results into same file * fix bug with upscalers not working properly * fix MPS on PyTorch 2.0.1, Intel Macs @@ -751,7 +751,7 @@ * switch to PyTorch 2.0.0 (except for AMD GPUs) * visual improvements to custom code scripts * add filename patterns: `[clip_skip]`, `[hasprompt<>]`, `[batch_number]`, `[generation_number]` - * add support for saving init images in img2img, and record their hashes in infotext for reproducability + * add support for saving init images in img2img, and record their hashes in infotext for reproducibility * automatically select current word when adjusting weight with ctrl+up/down * add dropdowns for X/Y/Z plot * add setting: Stable Diffusion/Random number generator source: makes it possible to make images generated from a given manual seed consistent across different GPUs diff --git a/_typos.toml b/_typos.toml new file mode 100644 index 000000000..1c63fe703 --- /dev/null +++ b/_typos.toml @@ -0,0 +1,5 @@ +[default.extend-words] +# Part of "RGBa" (Pillow's pre-multiplied alpha RGB mode) +Ba = "Ba" +# HSA is something AMD uses for their GPUs +HSA = "HSA" diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py index 04adc5eb2..9a1e0778f 100644 --- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -301,7 +301,7 @@ class DDPMV1(pl.LightningModule): elif self.parameterization == "x0": target = x_start else: - raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") + raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) @@ -880,7 +880,7 @@ class LatentDiffusionV1(DDPMV1): def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): - # hybrid case, cond is exptected to be a dict + # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): @@ -916,7 +916,7 @@ class LatentDiffusionV1(DDPMV1): cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': - assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' + assert 'original_image_size' in self.split_input_params, 'BoundingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) @@ -926,7 +926,7 @@ class LatentDiffusionV1(DDPMV1): num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) - # get top left postions of patches as conforming for the bbbox tokenizer, therefore we + # get top left positions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) diff --git a/extensions-builtin/Lora/lyco_helpers.py b/extensions-builtin/Lora/lyco_helpers.py index 1679a0ce6..6f134d54e 100644 --- a/extensions-builtin/Lora/lyco_helpers.py +++ b/extensions-builtin/Lora/lyco_helpers.py @@ -30,7 +30,7 @@ def factorization(dimension: int, factor:int=-1) -> tuple[int, int]: In LoRA with Kroneckor Product, first value is a value for weight scale. secon value is a value for weight. - Becuase of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different. + Because of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different. examples) factor diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 83ea2802b..04bd19117 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -355,7 +355,7 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn """ Applies the currently selected set of networks to the weights of torch layer self. If weights already have this particular set of networks applied, does nothing. - If not, restores orginal weights from backup and alters weights according to networks. + If not, restores original weights from backup and alters weights according to networks. """ network_layer_name = getattr(self, 'network_layer_name', None) diff --git a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js index df60c1a17..64e7a638a 100644 --- a/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js +++ b/extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js @@ -292,7 +292,7 @@ onUiLoaded(async() => { // Create tooltip function createTooltip() { - const toolTipElemnt = + const toolTipElement = targetElement.querySelector(".image-container"); const tooltip = document.createElement("div"); tooltip.className = "canvas-tooltip"; @@ -355,7 +355,7 @@ onUiLoaded(async() => { tooltip.appendChild(tooltipContent); // Add a hint element to the target element - toolTipElemnt.appendChild(tooltip); + toolTipElement.appendChild(tooltip); } //Show tool tip if setting enable diff --git a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py index 89b7c31f2..17b27b274 100644 --- a/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py +++ b/extensions-builtin/canvas-zoom-and-pan/scripts/hotkey_config.py @@ -8,8 +8,8 @@ shared.options_templates.update(shared.options_section(('canvas_hotkey', "Canvas "canvas_hotkey_grow_brush": shared.OptionInfo("W", "Enlarge the brush size"), "canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"), "canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "), - "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"), - "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"), + "canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas position"), + "canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, needed for testing"), "canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"), "canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"), "canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"), diff --git a/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py b/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py index d90243442..d4cf3fda3 100644 --- a/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py +++ b/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py @@ -104,7 +104,7 @@ def latent_blend(settings, a, b, t): def get_modified_nmask(settings, nmask, sigma): """ - Converts a negative mask representing the transparency of the original latent vectors being overlayed + Converts a negative mask representing the transparency of the original latent vectors being overlaid to a mask that is scaled according to the denoising strength for this step. Where: diff --git a/javascript/aspectRatioOverlay.js b/javascript/aspectRatioOverlay.js index 2cf2d571f..c8751fe49 100644 --- a/javascript/aspectRatioOverlay.js +++ b/javascript/aspectRatioOverlay.js @@ -50,17 +50,17 @@ function dimensionChange(e, is_width, is_height) { var scaledx = targetElement.naturalWidth * viewportscale; var scaledy = targetElement.naturalHeight * viewportscale; - var cleintRectTop = (viewportOffset.top + window.scrollY); - var cleintRectLeft = (viewportOffset.left + window.scrollX); - var cleintRectCentreY = cleintRectTop + (targetElement.clientHeight / 2); - var cleintRectCentreX = cleintRectLeft + (targetElement.clientWidth / 2); + var clientRectTop = (viewportOffset.top + window.scrollY); + var clientRectLeft = (viewportOffset.left + window.scrollX); + var clientRectCentreY = clientRectTop + (targetElement.clientHeight / 2); + var clientRectCentreX = clientRectLeft + (targetElement.clientWidth / 2); var arscale = Math.min(scaledx / currentWidth, scaledy / currentHeight); var arscaledx = currentWidth * arscale; var arscaledy = currentHeight * arscale; - var arRectTop = cleintRectCentreY - (arscaledy / 2); - var arRectLeft = cleintRectCentreX - (arscaledx / 2); + var arRectTop = clientRectCentreY - (arscaledy / 2); + var arRectLeft = clientRectCentreX - (arscaledx / 2); var arRectWidth = arscaledx; var arRectHeight = arscaledy; diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 1610698bf..c21433db5 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -290,7 +290,7 @@ function extraNetworksTreeProcessDirectoryClick(event, btn, tabname, extra_netwo * Processes `onclick` events when user clicks on directories in tree. * * Here is how the tree reacts to clicks for various states: - * unselected unopened directory: Diretory is selected and expanded. + * unselected unopened directory: Directory is selected and expanded. * unselected opened directory: Directory is selected. * selected opened directory: Directory is collapsed and deselected. * chevron is clicked: Directory is expanded or collapsed. Selected state unchanged. diff --git a/javascript/ui.js b/javascript/ui.js index 3d079b3df..1eef6d337 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -411,7 +411,7 @@ function switchWidthHeight(tabname) { var onEditTimers = {}; -// calls func after afterMs milliseconds has passed since the input elem has beed enited by user +// calls func after afterMs milliseconds has passed since the input elem has been edited by user function onEdit(editId, elem, afterMs, func) { var edited = function() { var existingTimer = onEditTimers[editId]; diff --git a/modules/api/api.py b/modules/api/api.py index 4e6560826..78ff70df7 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -360,7 +360,7 @@ class Api: return script_args def apply_infotext(self, request, tabname, *, script_runner=None, mentioned_script_args=None): - """Processes `infotext` field from the `request`, and sets other fields of the `request` accoring to what's in infotext. + """Processes `infotext` field from the `request`, and sets other fields of the `request` according to what's in infotext. If request already has a field set, and that field is encountered in infotext too, the value from infotext is ignored. @@ -409,8 +409,8 @@ class Api: if request.override_settings is None: request.override_settings = {} - overriden_settings = infotext_utils.get_override_settings(params) - for _, setting_name, value in overriden_settings: + overridden_settings = infotext_utils.get_override_settings(params) + for _, setting_name, value in overridden_settings: if setting_name not in request.override_settings: request.override_settings[setting_name] = value diff --git a/modules/call_queue.py b/modules/call_queue.py index bcd7c5462..b50931bcd 100644 --- a/modules/call_queue.py +++ b/modules/call_queue.py @@ -100,8 +100,8 @@ def wrap_gradio_call(func, extra_outputs=None, add_stats=False): sys_pct = sys_peak/max(sys_total, 1) * 100 toltip_a = "Active: peak amount of video memory used during generation (excluding cached data)" - toltip_r = "Reserved: total amout of video memory allocated by the Torch library " - toltip_sys = "System: peak amout of video memory allocated by all running programs, out of total capacity" + toltip_r = "Reserved: total amount of video memory allocated by the Torch library " + toltip_sys = "System: peak amount of video memory allocated by all running programs, out of total capacity" text_a = f"A: {active_peak/1024:.2f} GB" text_r = f"R: {reserved_peak/1024:.2f} GB" diff --git a/modules/devices.py b/modules/devices.py index 28c0c54d8..e4f671ac6 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -259,7 +259,7 @@ def test_for_nans(x, where): def first_time_calculation(): """ just do any calculation with pytorch layers - the first time this is done it allocaltes about 700MB of memory and - spends about 2.7 seconds doing that, at least wih NVidia. + spends about 2.7 seconds doing that, at least with NVidia. """ x = torch.zeros((1, 1)).to(device, dtype) diff --git a/modules/extra_networks.py b/modules/extra_networks.py index 04249dffd..ae8d42d9b 100644 --- a/modules/extra_networks.py +++ b/modules/extra_networks.py @@ -60,7 +60,7 @@ class ExtraNetwork: Where name matches the name of this ExtraNetwork object, and arg1:arg2:arg3 are any natural number of text arguments separated by colon. - Even if the user does not mention this ExtraNetwork in his prompt, the call will stil be made, with empty params_list - + Even if the user does not mention this ExtraNetwork in his prompt, the call will still be made, with empty params_list - in this case, all effects of this extra networks should be disabled. Can be called multiple times before deactivate() - each new call should override the previous call completely. diff --git a/modules/initialize.py b/modules/initialize.py index f7313ff4d..08ad4c0b0 100644 --- a/modules/initialize.py +++ b/modules/initialize.py @@ -139,7 +139,7 @@ def initialize_rest(*, reload_script_modules=False): """ Accesses shared.sd_model property to load model. After it's available, if it has been loaded before this access by some extension, - its optimization may be None because the list of optimizaers has neet been filled + its optimization may be None because the list of optimizers has not been filled by that time, so we apply optimization again. """ from modules import devices diff --git a/modules/mac_specific.py b/modules/mac_specific.py index d96d86d79..039689f32 100644 --- a/modules/mac_specific.py +++ b/modules/mac_specific.py @@ -12,7 +12,7 @@ log = logging.getLogger(__name__) # before torch version 1.13, has_mps is only available in nightly pytorch and macOS 12.3+, # use check `getattr` and try it for compatibility. -# in torch version 1.13, backends.mps.is_available() and backends.mps.is_built() are introduced in to check mps availabilty, +# in torch version 1.13, backends.mps.is_available() and backends.mps.is_built() are introduced in to check mps availability, # since torch 2.0.1+ nightly build, getattr(torch, 'has_mps', False) was deprecated, see https://github.com/pytorch/pytorch/pull/103279 def check_for_mps() -> bool: if version.parse(torch.__version__) <= version.parse("2.0.1"): diff --git a/modules/modelloader.py b/modules/modelloader.py index e100bb246..115415c8e 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -110,7 +110,7 @@ def load_upscalers(): except Exception: pass - datas = [] + data = [] commandline_options = vars(shared.cmd_opts) # some of upscaler classes will not go away after reloading their modules, and we'll end @@ -129,10 +129,10 @@ def load_upscalers(): scaler = cls(commandline_model_path) scaler.user_path = commandline_model_path scaler.model_download_path = commandline_model_path or scaler.model_path - datas += scaler.scalers + data += scaler.scalers shared.sd_upscalers = sorted( - datas, + data, # Special case for UpscalerNone keeps it at the beginning of the list. key=lambda x: x.name.lower() if not isinstance(x.scaler, (UpscalerNone, UpscalerLanczos, UpscalerNearest)) else "" ) diff --git a/modules/models/diffusion/ddpm_edit.py b/modules/models/diffusion/ddpm_edit.py index 6db340da4..7b51c83c5 100644 --- a/modules/models/diffusion/ddpm_edit.py +++ b/modules/models/diffusion/ddpm_edit.py @@ -341,7 +341,7 @@ class DDPM(pl.LightningModule): elif self.parameterization == "x0": target = x_start else: - raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") + raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) @@ -901,7 +901,7 @@ class LatentDiffusion(DDPM): def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): - # hybrid case, cond is exptected to be a dict + # hybrid case, cond is expected to be a dict pass else: if not isinstance(cond, list): @@ -937,7 +937,7 @@ class LatentDiffusion(DDPM): cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': - assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' + assert 'original_image_size' in self.split_input_params, 'BoundingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) @@ -947,7 +947,7 @@ class LatentDiffusion(DDPM): num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) - # get top left postions of patches as conforming for the bbbox tokenizer, therefore we + # get top left positions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) diff --git a/modules/rng.py b/modules/rng.py index 8934d39bf..5390d1bb7 100644 --- a/modules/rng.py +++ b/modules/rng.py @@ -34,7 +34,7 @@ def randn_local(seed, shape): def randn_like(x): - """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. + """Generate a tensor with random numbers from a normal distribution using the previously initialized generator. Use either randn() or manual_seed() to initialize the generator.""" @@ -48,7 +48,7 @@ def randn_like(x): def randn_without_seed(shape, generator=None): - """Generate a tensor with random numbers from a normal distribution using the previously initialized genrator. + """Generate a tensor with random numbers from a normal distribution using the previously initialized generator. Use either randn() or manual_seed() to initialize the generator.""" diff --git a/modules/scripts.py b/modules/scripts.py index 94690a22f..77f5e4f3e 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -92,7 +92,7 @@ class Script: """If true, the script setup will only be run in Gradio UI, not in API""" controls = None - """A list of controls retured by the ui().""" + """A list of controls returned by the ui().""" def title(self): """this function should return the title of the script. This is what will be displayed in the dropdown menu.""" @@ -109,7 +109,7 @@ class Script: def show(self, is_img2img): """ - is_img2img is True if this function is called for the img2img interface, and Fasle otherwise + is_img2img is True if this function is called for the img2img interface, and False otherwise This function should return: - False if the script should not be shown in UI at all diff --git a/modules/sd_emphasis.py b/modules/sd_emphasis.py index 654817b60..49ef1a6ac 100644 --- a/modules/sd_emphasis.py +++ b/modules/sd_emphasis.py @@ -35,7 +35,7 @@ class EmphasisIgnore(Emphasis): class EmphasisOriginal(Emphasis): name = "Original" - description = "the orginal emphasis implementation" + description = "the original emphasis implementation" def after_transformers(self): original_mean = self.z.mean() @@ -48,7 +48,7 @@ class EmphasisOriginal(Emphasis): class EmphasisOriginalNoNorm(EmphasisOriginal): name = "No norm" - description = "same as orginal, but without normalization (seems to work better for SDXL)" + description = "same as original, but without normalization (seems to work better for SDXL)" def after_transformers(self): self.z = self.z * self.multipliers.reshape(self.multipliers.shape + (1,)).expand(self.z.shape) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 98350ac43..81c60f485 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -23,7 +23,7 @@ class PromptChunk: PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding']) """An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt -chunk. Thos objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally +chunk. Those objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally are applied by sd_hijack.EmbeddingsWithFixes's forward function.""" @@ -66,7 +66,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): def encode_with_transformers(self, tokens): """ - converts a batch of token ids (in python lists) into a single tensor with numeric respresentation of those tokens; + converts a batch of token ids (in python lists) into a single tensor with numeric representation of those tokens; All python lists with tokens are assumed to have same length, usually 77. if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on model - can be 768 and 1024. @@ -136,7 +136,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): if token == self.comma_token: last_comma = len(chunk.tokens) - # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack + # this is when we are at the end of allotted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next. elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack: break_location = last_comma + 1 @@ -206,7 +206,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, for SD2 it's 1024, and for SDXL it's 1280. An example shape returned by this function can be: (2, 77, 768). For SDXL, instead of returning one tensor avobe, it returns a tuple with two: the other one with shape (B, 1280) with pooled values. - Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet + Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one element is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream" """ diff --git a/modules/sd_models.py b/modules/sd_models.py index 747fc39ee..b35aecbca 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -784,7 +784,7 @@ def reuse_model_from_already_loaded(sd_model, checkpoint_info, timer): If it is loaded, returns that (moving it to GPU if necessary, and moving the currently loadded model to CPU if necessary). If not, returns the model that can be used to load weights from checkpoint_info's file. If no such model exists, returns None. - Additionaly deletes loaded models that are over the limit set in settings (sd_checkpoints_limit). + Additionally deletes loaded models that are over the limit set in settings (sd_checkpoints_limit). """ already_loaded = None diff --git a/modules/shared.py b/modules/shared.py index ccdca4e70..b4ba14ad7 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -43,7 +43,7 @@ restricted_opts = None sd_model: sd_models_types.WebuiSdModel = None settings_components = None -"""assinged from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" +"""assigned from ui.py, a mapping on setting names to gradio components repsponsible for those settings""" tab_names = [] diff --git a/modules/shared_options.py b/modules/shared_options.py index 073454c6a..536766dbe 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -213,7 +213,7 @@ options_templates.update(options_section(('optimizations', "Optimizations", "sd" "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"), "pad_cond_uncond_v0": OptionInfo(False, "Pad prompt/negative prompt (v0)", infotext='Pad conds v0').info("alternative implementation for the above; used prior to 1.6.0 for DDIM sampler; overrides the above if set; WARNING: truncates negative prompt if it's too long; changes seeds"), "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"), - "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"), + "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond commandline argument"), "fp8_storage": OptionInfo("Disable", "FP8 weight", gr.Radio, {"choices": ["Disable", "Enable for SDXL", "Enable"]}).info("Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0."), "cache_fp16_weight": OptionInfo(False, "Cache FP16 weight for LoRA").info("Cache fp16 weight when enabling FP8, will increase the quality of LoRA. Use more system ram."), })) @@ -370,7 +370,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 'rho': OptionInfo(0.0, "rho", gr.Number, infotext='Schedule rho').info("0 = default (7 for karras, 1 for polyexponential); higher values result in a steeper noise schedule (decreases faster)"), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}, infotext='ENSD').info("ENSD; does not improve anything, just produces different results for ancestral samplers - only useful for reproducing images"), 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma", infotext='Discard penultimate sigma').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/6044"), - 'sgm_noise_multiplier': OptionInfo(False, "SGM noise multiplier", infotext='SGM noise multplier').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818").info("Match initial noise to official SDXL implementation - only useful for reproducing images"), + 'sgm_noise_multiplier': OptionInfo(False, "SGM noise multiplier", infotext='SGM noise multiplier').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12818").info("Match initial noise to official SDXL implementation - only useful for reproducing images"), 'uni_pc_variant': OptionInfo("bh1", "UniPC variant", gr.Radio, {"choices": ["bh1", "bh2", "vary_coeff"]}, infotext='UniPC variant'), 'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}, infotext='UniPC skip type'), 'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}, infotext='UniPC order').info("must be < sampling steps"), diff --git a/modules/shared_state.py b/modules/shared_state.py index 33996691c..db20b7639 100644 --- a/modules/shared_state.py +++ b/modules/shared_state.py @@ -157,7 +157,7 @@ class State: self.current_image_sampling_step = self.sampling_step except Exception: - # when switching models during genration, VAE would be on CPU, so creating an image will fail. + # when switching models during generation, VAE would be on CPU, so creating an image will fail. # we silently ignore this error errors.record_exception() diff --git a/modules/textual_inversion/autocrop.py b/modules/textual_inversion/autocrop.py index e223a2e0c..ca858ef4c 100644 --- a/modules/textual_inversion/autocrop.py +++ b/modules/textual_inversion/autocrop.py @@ -65,7 +65,7 @@ def crop_image(im, settings): rect[3] -= 1 d.rectangle(rect, outline=GREEN) results.append(im_debug) - if settings.destop_view_image: + if settings.desktop_view_image: im_debug.show() return results @@ -341,5 +341,5 @@ class Settings: self.entropy_points_weight = entropy_points_weight self.face_points_weight = face_points_weight self.annotate_image = annotate_image - self.destop_view_image = False + self.desktop_view_image = False self.dnn_model_path = dnn_model_path diff --git a/modules/textual_inversion/image_embedding.py b/modules/textual_inversion/image_embedding.py index 81cff7bf1..ea4b88333 100644 --- a/modules/textual_inversion/image_embedding.py +++ b/modules/textual_inversion/image_embedding.py @@ -193,11 +193,11 @@ if __name__ == '__main__': embedded_image = insert_image_data_embed(cap_image, test_embed) - retrived_embed = extract_image_data_embed(embedded_image) + retrieved_embed = extract_image_data_embed(embedded_image) - assert str(retrived_embed) == str(test_embed) + assert str(retrieved_embed) == str(test_embed) - embedded_image2 = insert_image_data_embed(cap_image, retrived_embed) + embedded_image2 = insert_image_data_embed(cap_image, retrieved_embed) assert embedded_image == embedded_image2 diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 6d815c0b3..c206ef5fd 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -172,7 +172,7 @@ class EmbeddingDatabase: if data: name = data.get('name', name) else: - # if data is None, means this is not an embeding, just a preview image + # if data is None, means this is not an embedding, just a preview image return elif ext in ['.BIN', '.PT']: data = torch.load(path, map_location="cpu") diff --git a/modules/ui_common.py b/modules/ui_common.py index cf1b8b32c..31b5492ea 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -105,7 +105,7 @@ def save_files(js_data, images, do_make_zip, index): logfile_path = os.path.join(shared.opts.outdir_save, "log.csv") # NOTE: ensure csv integrity when fields are added by - # updating headers and padding with delimeters where needed + # updating headers and padding with delimiters where needed if os.path.exists(logfile_path): update_logfile(logfile_path, fields) diff --git a/modules/ui_components.py b/modules/ui_components.py index 55979f626..9cf67722a 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -88,7 +88,7 @@ class DropdownEditable(FormComponent, gr.Dropdown): class InputAccordion(gr.Checkbox): """A gr.Accordion that can be used as an input - returns True if open, False if closed. - Actaully just a hidden checkbox, but creates an accordion that follows and is followed by the state of the checkbox. + Actually just a hidden checkbox, but creates an accordion that follows and is followed by the state of the checkbox. """ global_index = 0 diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index a24ea32ef..913e1444e 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -380,7 +380,7 @@ def install_extension_from_url(dirname, url, branch_name=None): except OSError as err: if err.errno == errno.EXDEV: # Cross device link, typical in docker or when tmp/ and extensions/ are on different file systems - # Since we can't use a rename, do the slower but more versitile shutil.move() + # Since we can't use a rename, do the slower but more versatile shutil.move() shutil.move(tmpdir, target_dir) else: # Something else, not enough free space, permissions, etc. rethrow it so that it gets handled. diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py index d67e3f17e..f71b40c41 100644 --- a/modules/ui_prompt_styles.py +++ b/modules/ui_prompt_styles.py @@ -67,7 +67,7 @@ class UiPromptStyles: with gr.Row(): self.selection = gr.Dropdown(label="Styles", elem_id=f"{tabname}_styles_edit_select", choices=list(shared.prompt_styles.styles), value=[], allow_custom_value=True, info="Styles allow you to add custom text to prompt. Use the {prompt} token in style text, and it will be replaced with user's prompt when applying style. Otherwise, style's text will be added to the end of the prompt.") ui_common.create_refresh_button([self.dropdown, self.selection], shared.prompt_styles.reload, lambda: {"choices": list(shared.prompt_styles.styles)}, f"refresh_{tabname}_styles") - self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply_dialog", tooltip="Apply all selected styles from the style selction dropdown in main UI to the prompt.") + self.materialize = ui_components.ToolButton(value=styles_materialize_symbol, elem_id=f"{tabname}_style_apply_dialog", tooltip="Apply all selected styles from the style selection dropdown in main UI to the prompt.") self.copy = ui_components.ToolButton(value=styles_copy_symbol, elem_id=f"{tabname}_style_copy", tooltip="Copy main UI prompt to style.") with gr.Row(): diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index c98ab4809..5df9dff9c 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -102,7 +102,7 @@ def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.0 shaped_noise_fft = _fft2(noise_rgb) shaped_noise_fft[:, :, :] = np.absolute(shaped_noise_fft[:, :, :]) ** 2 * (src_dist ** noise_q) * src_phase # perform the actual shaping - brightness_variation = 0. # color_variation # todo: temporarily tieing brightness variation to color variation for now + brightness_variation = 0. # color_variation # todo: temporarily tying brightness variation to color variation for now contrast_adjusted_np_src = _np_src_image[:] * (brightness_variation + 1.) - brightness_variation * 2. # scikit-image is used for histogram matching, very convenient! diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 6d3e42c06..57ee47088 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -45,7 +45,7 @@ def apply_prompt(p, x, xs): def apply_order(p, x, xs): token_order = [] - # Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen + # Initially grab the tokens from the prompt, so they can be replaced in order of earliest seen for token in x: token_order.append((p.prompt.find(token), token)) From 3fb1c2e58d30ea378c49f7d0e10df916cef1473e Mon Sep 17 00:00:00 2001 From: wangshuai09 <391746016@qq.com> Date: Mon, 4 Mar 2024 17:19:37 +0800 Subject: [PATCH 60/69] fix npu-smi command --- webui.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.sh b/webui.sh index be2b853b0..eb76bcab1 100755 --- a/webui.sh +++ b/webui.sh @@ -158,7 +158,7 @@ then if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] then export TORCH_COMMAND="pip install torch==2.0.1+rocm5.4.2 torchvision==0.15.2+rocm5.4.2 --index-url https://download.pytorch.org/whl/rocm5.4.2" - elif eval "npu-smi info" + elif npu-smi info 2>/dev/null then export TORCH_COMMAND="pip install torch==2.1.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu; pip install torch_npu==2.1.0" From 67d8dafe4474c8f63889630fe61075e2ad507085 Mon Sep 17 00:00:00 2001 From: Alon Burg Date: Thu, 29 Feb 2024 10:07:15 +0200 Subject: [PATCH 61/69] Fix EXIF orientation in API image loading --- modules/api/api.py | 2 ++ modules/images.py | 49 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/modules/api/api.py b/modules/api/api.py index 4e6560826..5742e6e6e 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -86,6 +86,7 @@ def decode_base64_to_image(encoding): response = requests.get(encoding, timeout=30, headers=headers) try: image = Image.open(BytesIO(response.content)) + image = images.apply_exif_orientation(image) return image except Exception as e: raise HTTPException(status_code=500, detail="Invalid image url") from e @@ -94,6 +95,7 @@ def decode_base64_to_image(encoding): encoding = encoding.split(";")[1].split(",")[1] try: image = Image.open(BytesIO(base64.b64decode(encoding))) + image = images.apply_exif_orientation(image) return image except Exception as e: raise HTTPException(status_code=500, detail="Invalid encoded image") from e diff --git a/modules/images.py b/modules/images.py index b6f2358c3..1728ebc3e 100644 --- a/modules/images.py +++ b/modules/images.py @@ -797,3 +797,52 @@ def flatten(img, bgcolor): return img.convert('RGB') + +# https://www.exiv2.org/tags.html +_EXIF_ORIENT = 274 # exif 'Orientation' tag + +def apply_exif_orientation(image): + """ + Applies the exif orientation correctly. + + This code exists per the bug: + https://github.com/python-pillow/Pillow/issues/3973 + with the function `ImageOps.exif_transpose`. The Pillow source raises errors with + various methods, especially `tobytes` + + Function based on: + https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59 + https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527 + + Args: + image (PIL.Image): a PIL image + + Returns: + (PIL.Image): the PIL image with exif orientation applied, if applicable + """ + if not hasattr(image, "getexif"): + return image + + try: + exif = image.getexif() + except Exception: # https://github.com/facebookresearch/detectron2/issues/1885 + exif = None + + if exif is None: + return image + + orientation = exif.get(_EXIF_ORIENT) + + method = { + 2: Image.FLIP_LEFT_RIGHT, + 3: Image.ROTATE_180, + 4: Image.FLIP_TOP_BOTTOM, + 5: Image.TRANSPOSE, + 6: Image.ROTATE_270, + 7: Image.TRANSVERSE, + 8: Image.ROTATE_90, + }.get(orientation) + + if method is not None: + return image.transpose(method) + return image From 0dc12861efee9d1e1eacb2d1903bf0fcd43fcfcc Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 4 Mar 2024 15:30:46 +0300 Subject: [PATCH 62/69] call script_callbacks.ui_settings_callback earlier; fix extra-options-section built-in extension killing the ui if using a setting that doesn't exist --- .../scripts/extra_options_section.py | 8 ++++++-- modules/ui.py | 4 +++- modules/ui_settings.py | 4 +++- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/extensions-builtin/extra-options-section/scripts/extra_options_section.py b/extensions-builtin/extra-options-section/scripts/extra_options_section.py index 4c10d9c7d..a91bea4fa 100644 --- a/extensions-builtin/extra-options-section/scripts/extra_options_section.py +++ b/extensions-builtin/extra-options-section/scripts/extra_options_section.py @@ -1,7 +1,7 @@ import math import gradio as gr -from modules import scripts, shared, ui_components, ui_settings, infotext_utils +from modules import scripts, shared, ui_components, ui_settings, infotext_utils, errors from modules.ui_components import FormColumn @@ -42,7 +42,11 @@ class ExtraOptionsSection(scripts.Script): setting_name = extra_options[index] with FormColumn(): - comp = ui_settings.create_setting_component(setting_name) + try: + comp = ui_settings.create_setting_component(setting_name) + except KeyError: + errors.report(f"Can't add extra options for {setting_name} in ui") + continue self.comps.append(comp) self.setting_names.append(setting_name) diff --git a/modules/ui.py b/modules/ui.py index dcba8e885..7b4341627 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -269,6 +269,9 @@ def create_ui(): parameters_copypaste.reset() + settings = ui_settings.UiSettings() + settings.register_settings() + scripts.scripts_current = scripts.scripts_txt2img scripts.scripts_txt2img.initialize_scripts(is_img2img=False) @@ -1116,7 +1119,6 @@ def create_ui(): loadsave = ui_loadsave.UiLoadsave(cmd_opts.ui_config_file) ui_settings_from_file = loadsave.ui_settings.copy() - settings = ui_settings.UiSettings() settings.create_ui(loadsave, dummy_component) interfaces = [ diff --git a/modules/ui_settings.py b/modules/ui_settings.py index e054d00ab..d17ef1d95 100644 --- a/modules/ui_settings.py +++ b/modules/ui_settings.py @@ -98,6 +98,9 @@ class UiSettings: return get_value_for_setting(key), opts.dumpjson() + def register_settings(self): + script_callbacks.ui_settings_callback() + def create_ui(self, loadsave, dummy_component): self.components = [] self.component_dict = {} @@ -105,7 +108,6 @@ class UiSettings: shared.settings_components = self.component_dict - script_callbacks.ui_settings_callback() opts.reorder() with gr.Blocks(analytics_enabled=False) as settings_interface: From 09b5ce68a99da700dd5a63f4475b0ac2d2a959e2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 4 Mar 2024 19:14:53 +0300 Subject: [PATCH 63/69] add images.read to automatically fix all jpeg/png weirdness --- modules/api/api.py | 6 +-- modules/images.py | 66 ++++++++-------------------- modules/img2img.py | 25 +++++------ modules/infotext_utils.py | 6 +-- modules/postprocessing.py | 6 +-- modules/textual_inversion/dataset.py | 4 +- 6 files changed, 41 insertions(+), 72 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index a0e70329c..0630e77e8 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -85,8 +85,7 @@ def decode_base64_to_image(encoding): headers = {'user-agent': opts.api_useragent} if opts.api_useragent else {} response = requests.get(encoding, timeout=30, headers=headers) try: - image = Image.open(BytesIO(response.content)) - image = images.apply_exif_orientation(image) + image = images.read(BytesIO(response.content)) return image except Exception as e: raise HTTPException(status_code=500, detail="Invalid image url") from e @@ -94,8 +93,7 @@ def decode_base64_to_image(encoding): if encoding.startswith("data:image/"): encoding = encoding.split(";")[1].split(",")[1] try: - image = Image.open(BytesIO(base64.b64decode(encoding))) - image = images.apply_exif_orientation(image) + image = images.read(BytesIO(base64.b64decode(encoding))) return image except Exception as e: raise HTTPException(status_code=500, detail="Invalid encoded image") from e diff --git a/modules/images.py b/modules/images.py index de90b4033..c50b2455d 100644 --- a/modules/images.py +++ b/modules/images.py @@ -12,7 +12,7 @@ import re import numpy as np import piexif import piexif.helper -from PIL import Image, ImageFont, ImageDraw, ImageColor, PngImagePlugin +from PIL import Image, ImageFont, ImageDraw, ImageColor, PngImagePlugin, ImageOps import string import json import hashlib @@ -551,12 +551,6 @@ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_p else: pnginfo_data = None - # Error handling for unsupported transparency in RGB mode - if (image.mode == "RGB" and - "transparency" in image.info and - isinstance(image.info["transparency"], bytes)): - del image.info["transparency"] - image.save(filename, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data) elif extension.lower() in (".jpg", ".jpeg", ".webp"): @@ -779,7 +773,7 @@ def image_data(data): import gradio as gr try: - image = Image.open(io.BytesIO(data)) + image = read(io.BytesIO(data)) textinfo, _ = read_info_from_image(image) return textinfo, None except Exception: @@ -807,51 +801,29 @@ def flatten(img, bgcolor): return img.convert('RGB') -# https://www.exiv2.org/tags.html -_EXIF_ORIENT = 274 # exif 'Orientation' tag +def read(fp, **kwargs): + image = Image.open(fp, **kwargs) + image = fix_image(image) -def apply_exif_orientation(image): - """ - Applies the exif orientation correctly. + return image - This code exists per the bug: - https://github.com/python-pillow/Pillow/issues/3973 - with the function `ImageOps.exif_transpose`. The Pillow source raises errors with - various methods, especially `tobytes` - Function based on: - https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59 - https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527 - - Args: - image (PIL.Image): a PIL image - - Returns: - (PIL.Image): the PIL image with exif orientation applied, if applicable - """ - if not hasattr(image, "getexif"): - return image +def fix_image(image: Image.Image): + if image is None: + return None try: - exif = image.getexif() - except Exception: # https://github.com/facebookresearch/detectron2/issues/1885 - exif = None + image = ImageOps.exif_transpose(image) + image = fix_png_transparency(image) + except Exception: + pass - if exif is None: + return image + + +def fix_png_transparency(image: Image.Image): + if image.mode not in ("RGB", "P") or not isinstance(image.info.get("transparency"), bytes): return image - orientation = exif.get(_EXIF_ORIENT) - - method = { - 2: Image.FLIP_LEFT_RIGHT, - 3: Image.ROTATE_180, - 4: Image.FLIP_TOP_BOTTOM, - 5: Image.TRANSPOSE, - 6: Image.ROTATE_270, - 7: Image.TRANSVERSE, - 8: Image.ROTATE_90, - }.get(orientation) - - if method is not None: - return image.transpose(method) + image = image.convert("RGBA") return image diff --git a/modules/img2img.py b/modules/img2img.py index f81405df5..e7fb3ea3c 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -6,7 +6,7 @@ import numpy as np from PIL import Image, ImageOps, ImageFilter, ImageEnhance, UnidentifiedImageError import gradio as gr -from modules import images as imgutil +from modules import images from modules.infotext_utils import create_override_settings_dict, parse_generation_parameters from modules.processing import Processed, StableDiffusionProcessingImg2Img, process_images from modules.shared import opts, state @@ -21,7 +21,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal output_dir = output_dir.strip() processing.fix_seed(p) - images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff"))) + batch_images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff"))) is_inpaint_batch = False if inpaint_mask_dir: @@ -31,9 +31,9 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal if is_inpaint_batch: print(f"\nInpaint batch is enabled. {len(inpaint_masks)} masks found.") - print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.") + print(f"Will process {len(batch_images)} images, creating {p.n_iter * p.batch_size} new images for each.") - state.job_count = len(images) * p.n_iter + state.job_count = len(batch_images) * p.n_iter # extract "default" params to use in case getting png info fails prompt = p.prompt @@ -46,8 +46,8 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal sd_model_checkpoint_override = get_closet_checkpoint_match(override_settings.get("sd_model_checkpoint", None)) batch_results = None discard_further_results = False - for i, image in enumerate(images): - state.job = f"{i+1} out of {len(images)}" + for i, image in enumerate(batch_images): + state.job = f"{i+1} out of {len(batch_images)}" if state.skipped: state.skipped = False @@ -55,7 +55,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal break try: - img = Image.open(image) + img = images.read(image) except UnidentifiedImageError as e: print(e) continue @@ -86,7 +86,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal # otherwise user has many masks with the same name but different extensions mask_image_path = masks_found[0] - mask_image = Image.open(mask_image_path) + mask_image = images.read(mask_image_path) p.image_mask = mask_image if use_png_info: @@ -94,8 +94,8 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal info_img = img if png_info_dir: info_img_path = os.path.join(png_info_dir, os.path.basename(image)) - info_img = Image.open(info_img_path) - geninfo, _ = imgutil.read_info_from_image(info_img) + info_img = images.read(info_img_path) + geninfo, _ = images.read_info_from_image(info_img) parsed_parameters = parse_generation_parameters(geninfo) parsed_parameters = {k: v for k, v in parsed_parameters.items() if k in (png_info_props or {})} except Exception: @@ -175,9 +175,8 @@ def img2img(id_task: str, mode: int, prompt: str, negative_prompt: str, prompt_s image = None mask = None - # Use the EXIF orientation of photos taken by smartphones. - if image is not None: - image = ImageOps.exif_transpose(image) + image = images.fix_image(image) + mask = images.fix_image(mask) if selected_scale_tab == 1 and not is_batch: assert image, "Can't scale by because no image is selected" diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index e04a7bee9..a6de9db99 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -8,7 +8,7 @@ import sys import gradio as gr from modules.paths import data_path -from modules import shared, ui_tempdir, script_callbacks, processing, infotext_versions +from modules import shared, ui_tempdir, script_callbacks, processing, infotext_versions, images from PIL import Image sys.modules['modules.generation_parameters_copypaste'] = sys.modules[__name__] # alias for old name @@ -83,7 +83,7 @@ def image_from_url_text(filedata): assert is_in_right_dir, 'trying to open image file outside of allowed directories' filename = filename.rsplit('?', 1)[0] - return Image.open(filename) + return images.read(filename) if type(filedata) == list: if len(filedata) == 0: @@ -95,7 +95,7 @@ def image_from_url_text(filedata): filedata = filedata[len("data:image/png;base64,"):] filedata = base64.decodebytes(filedata.encode('utf-8')) - image = Image.open(io.BytesIO(filedata)) + image = images.read(io.BytesIO(filedata)) return image diff --git a/modules/postprocessing.py b/modules/postprocessing.py index f14882321..754cc9e3a 100644 --- a/modules/postprocessing.py +++ b/modules/postprocessing.py @@ -17,10 +17,10 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if extras_mode == 1: for img in image_folder: if isinstance(img, Image.Image): - image = img + image = images.fix_image(img) fn = '' else: - image = Image.open(os.path.abspath(img.name)) + image = images.read(os.path.abspath(img.name)) fn = os.path.splitext(img.orig_name)[0] yield image, fn elif extras_mode == 2: @@ -56,7 +56,7 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir, if isinstance(image_placeholder, str): try: - image_data = Image.open(image_placeholder) + image_data = images.read(image_placeholder) except Exception: continue else: diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 7ee050615..84fb5df01 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -10,7 +10,7 @@ from random import shuffle, choices import random import tqdm -from modules import devices, shared +from modules import devices, shared, images import re from ldm.modules.distributions.distributions import DiagonalGaussianDistribution @@ -61,7 +61,7 @@ class PersonalizedBase(Dataset): if shared.state.interrupted: raise Exception("interrupted") try: - image = Image.open(path) + image = images.read(path) #Currently does not work for single color transparency #We would need to read image.info['transparency'] for that if use_weight and 'A' in image.getbands(): From 801461eea209d166e1b06714ea7eebd76f9e10dd Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Mon, 4 Mar 2024 18:33:22 -0500 Subject: [PATCH 64/69] Re-use profiler visualization for extra networks --- .eslintrc.js | 2 + javascript/extraNetworks.js | 62 +++++++++ javascript/profilerVisualization.js | 205 +++++++++++++++------------- 3 files changed, 177 insertions(+), 92 deletions(-) diff --git a/.eslintrc.js b/.eslintrc.js index 9c70eff85..2e7258f6b 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -78,6 +78,8 @@ module.exports = { //extraNetworks.js requestGet: "readonly", popup: "readonly", + // profilerVisualization.js + createVisualizationTable: "readonly", // from python localization: "readonly", // progrssbar.js diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index c21433db5..7b487af1c 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -528,12 +528,74 @@ function popupId(id) { popup(storedPopupIds[id]); } +function extraNetworksFlattenMetadata(obj) { + const result = {}; + + // Convert any stringified JSON objects to actual objects + for (const key of Object.keys(obj)) { + if (typeof obj[key] === 'string') { + try { + const parsed = JSON.parse(obj[key]); + if (parsed && typeof parsed === 'object') { + obj[key] = parsed; + } + } catch (error) { + continue; + } + } + } + + // Flatten the object + for (const key of Object.keys(obj)) { + if (typeof obj[key] === 'object' && obj[key] !== null) { + const nested = extraNetworksFlattenMetadata(obj[key]); + for (const nestedKey of Object.keys(nested)) { + result[`${key}/${nestedKey}`] = nested[nestedKey]; + } + } else { + result[key] = obj[key]; + } + } + + // Special case for handling modelspec keys + for (const key of Object.keys(result)) { + if (key.startsWith("modelspec.")) { + result[key.replaceAll(".", "/")] = result[key]; + delete result[key]; + } + } + + // Add empty keys to designate hierarchy + for (const key of Object.keys(result)) { + const parts = key.split("/"); + for (let i = 1; i < parts.length; i++) { + const parent = parts.slice(0, i).join("/"); + if (!result[parent]) { + result[parent] = ""; + } + } + } + + return result; +} + function extraNetworksShowMetadata(text) { + try { + let parsed = JSON.parse(text); + if (parsed && typeof parsed === 'object') { + parsed = extraNetworksFlattenMetadata(parsed); + const table = createVisualizationTable(parsed, 0); + popup(table); + return; + } + } catch (error) { console.debug(error); } + var elem = document.createElement('pre'); elem.classList.add('popup-metadata'); elem.textContent = text; popup(elem); + return; } function requestGet(url, data, handler, errorHandler) { diff --git a/javascript/profilerVisualization.js b/javascript/profilerVisualization.js index 9d8e5f42f..9822f4b2a 100644 --- a/javascript/profilerVisualization.js +++ b/javascript/profilerVisualization.js @@ -33,120 +33,141 @@ function createRow(table, cellName, items) { return res; } -function showProfile(path, cutoff = 0.05) { - requestGet(path, {}, function(data) { - var table = document.createElement('table'); - table.className = 'popup-table'; +function createVisualizationTable(data, cutoff = 0, sort = "") { + var table = document.createElement('table'); + table.className = 'popup-table'; - data.records['total'] = data.total; - var keys = Object.keys(data.records).sort(function(a, b) { - return data.records[b] - data.records[a]; + var keys = Object.keys(data); + if (sort === "number") { + keys = keys.sort(function(a, b) { + return data[b] - data[a]; }); - var items = keys.map(function(x) { - return {key: x, parts: x.split('/'), time: data.records[x]}; + } else { + keys = keys.sort(); + } + var items = keys.map(function(x) { + return {key: x, parts: x.split('/'), value: data[x]}; + }); + var maxLength = items.reduce(function(a, b) { + return Math.max(a, b.parts.length); + }, 0); + + var cols = createRow( + table, + 'th', + [ + cutoff === 0 ? 'key' : 'record', + cutoff === 0 ? 'value' : 'seconds' + ] + ); + cols[0].colSpan = maxLength; + + function arraysEqual(a, b) { + return !(a < b || b < a); + } + + var addLevel = function(level, parent, hide) { + var matching = items.filter(function(x) { + return x.parts[level] && !x.parts[level + 1] && arraysEqual(x.parts.slice(0, level), parent); }); - var maxLength = items.reduce(function(a, b) { - return Math.max(a, b.parts.length); - }, 0); - - var cols = createRow(table, 'th', ['record', 'seconds']); - cols[0].colSpan = maxLength; - - function arraysEqual(a, b) { - return !(a < b || b < a); + if (sort === "number") { + matching = matching.sort(function(a, b) { + return b.value - a.value; + }); + } else { + matching = matching.sort(); } + var othersTime = 0; + var othersList = []; + var othersRows = []; + var childrenRows = []; + matching.forEach(function(x) { + var visible = (cutoff === 0 && !hide) || (x.value >= cutoff && !hide); - var addLevel = function(level, parent, hide) { - var matching = items.filter(function(x) { - return x.parts[level] && !x.parts[level + 1] && arraysEqual(x.parts.slice(0, level), parent); - }); - var sorted = matching.sort(function(a, b) { - return b.time - a.time; - }); - var othersTime = 0; - var othersList = []; - var othersRows = []; - var childrenRows = []; - sorted.forEach(function(x) { - var visible = x.time >= cutoff && !hide; + var cells = []; + for (var i = 0; i < maxLength; i++) { + cells.push(x.parts[i]); + } + cells.push(cutoff === 0 ? x.value : x.value.toFixed(3)); + var cols = createRow(table, 'td', cells); + for (i = 0; i < level; i++) { + cols[i].className = 'muted'; + } - var cells = []; - for (var i = 0; i < maxLength; i++) { - cells.push(x.parts[i]); - } - cells.push(x.time.toFixed(3)); - var cols = createRow(table, 'td', cells); - for (i = 0; i < level; i++) { - cols[i].className = 'muted'; - } + var tr = cols[0].parentNode; + if (!visible) { + tr.classList.add("hidden"); + } - var tr = cols[0].parentNode; - if (!visible) { - tr.classList.add("hidden"); - } - - if (x.time >= cutoff) { - childrenRows.push(tr); - } else { - othersTime += x.time; - othersList.push(x.parts[level]); - othersRows.push(tr); - } - - var children = addLevel(level + 1, parent.concat([x.parts[level]]), true); - if (children.length > 0) { - var cell = cols[level]; - var onclick = function() { - cell.classList.remove("link"); - cell.removeEventListener("click", onclick); - children.forEach(function(x) { - x.classList.remove("hidden"); - }); - }; - cell.classList.add("link"); - cell.addEventListener("click", onclick); - } - }); - - if (othersTime > 0) { - var cells = []; - for (var i = 0; i < maxLength; i++) { - cells.push(parent[i]); - } - cells.push(othersTime.toFixed(3)); - cells[level] = 'others'; - var cols = createRow(table, 'td', cells); - for (i = 0; i < level; i++) { - cols[i].className = 'muted'; - } + if (cutoff === 0 || x.value >= cutoff) { + childrenRows.push(tr); + } else { + othersTime += x.value; + othersList.push(x.parts[level]); + othersRows.push(tr); + } + var children = addLevel(level + 1, parent.concat([x.parts[level]]), true); + if (children.length > 0) { var cell = cols[level]; - var tr = cell.parentNode; var onclick = function() { - tr.classList.add("hidden"); cell.classList.remove("link"); cell.removeEventListener("click", onclick); - othersRows.forEach(function(x) { + children.forEach(function(x) { x.classList.remove("hidden"); }); }; - - cell.title = othersList.join(", "); cell.classList.add("link"); cell.addEventListener("click", onclick); + } + }); - if (hide) { - tr.classList.add("hidden"); - } - - childrenRows.push(tr); + if (othersTime > 0) { + var cells = []; + for (var i = 0; i < maxLength; i++) { + cells.push(parent[i]); + } + cells.push(othersTime.toFixed(3)); + cells[level] = 'others'; + var cols = createRow(table, 'td', cells); + for (i = 0; i < level; i++) { + cols[i].className = 'muted'; } - return childrenRows; - }; + var cell = cols[level]; + var tr = cell.parentNode; + var onclick = function() { + tr.classList.add("hidden"); + cell.classList.remove("link"); + cell.removeEventListener("click", onclick); + othersRows.forEach(function(x) { + x.classList.remove("hidden"); + }); + }; - addLevel(0, []); + cell.title = othersList.join(", "); + cell.classList.add("link"); + cell.addEventListener("click", onclick); + if (hide) { + tr.classList.add("hidden"); + } + + childrenRows.push(tr); + } + + return childrenRows; + }; + + addLevel(0, []); + + return table; +} + +function showProfile(path, cutoff = 0.05) { + requestGet(path, {}, function(data) { + data.records['total'] = data.total; + const table = createVisualizationTable(data.records, cutoff, "number"); popup(table); }); } From ecffe8513e8ff10c58365d9d7c7d4dcbd3dc750a Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Mon, 4 Mar 2024 18:46:25 -0500 Subject: [PATCH 65/69] Lint --- javascript/extraNetworks.js | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 7b487af1c..584fd6c75 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -588,7 +588,9 @@ function extraNetworksShowMetadata(text) { popup(table); return; } - } catch (error) { console.debug(error); } + } catch (error) { + console.eror(error); + } var elem = document.createElement('pre'); elem.classList.add('popup-metadata'); From 706f63adfaf3c5442d181c7979bf5fbd2219f760 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Tue, 5 Mar 2024 12:23:44 +0900 Subject: [PATCH 66/69] fix extract_style_text_from_prompt #15132 --- modules/styles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/styles.py b/modules/styles.py index 60bd8a7fb..a9d8636a9 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -42,7 +42,7 @@ def extract_style_text_from_prompt(style_text, prompt): stripped_style_text = style_text.strip() if "{prompt}" in stripped_style_text: - left, right = stripped_style_text.split("{prompt}", 2) + left, _, right = stripped_style_text.partition("{prompt}") if stripped_prompt.startswith(left) and stripped_prompt.endswith(right): prompt = stripped_prompt[len(left):len(stripped_prompt)-len(right)] return True, prompt From 7785d484ae8a2e987bf56119b99f93841ce96675 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Tue, 5 Mar 2024 11:50:53 -0500 Subject: [PATCH 67/69] Only override emphasis if actually used in prompt --- modules/infotext_utils.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index a6de9db99..db1866449 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -8,7 +8,7 @@ import sys import gradio as gr from modules.paths import data_path -from modules import shared, ui_tempdir, script_callbacks, processing, infotext_versions, images +from modules import shared, ui_tempdir, script_callbacks, processing, infotext_versions, images, prompt_parser from PIL import Image sys.modules['modules.generation_parameters_copypaste'] = sys.modules[__name__] # alias for old name @@ -356,7 +356,10 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "Cache FP16 weight for LoRA" not in res and res["FP8 weight"] != "Disable": res["Cache FP16 weight for LoRA"] = False - if "Emphasis" not in res: + prompt_attention = prompt_parser.parse_prompt_attention(prompt) + prompt_attention += prompt_parser.parse_prompt_attention(negative_prompt) + prompt_uses_emphasis = len(prompt_attention) != len([p for p in prompt_attention if p[1] == 1.0 or p[0] == 'BREAK']) + if "Emphasis" not in res and prompt_uses_emphasis: res["Emphasis"] = "Original" if "Refiner switch by sampling steps" not in res: From ed386c84b63b49402c4feb90ab466f9fb0781e37 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Tue, 5 Mar 2024 11:53:36 -0500 Subject: [PATCH 68/69] Fix emphasis infotext missing from `params.txt` --- modules/processing.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 411c7c3f4..93493f80e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -896,6 +896,10 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None: p.scripts.process_batch(p, batch_number=n, prompts=p.prompts, seeds=p.seeds, subseeds=p.subseeds) + p.setup_conds() + + p.extra_generation_params.update(model_hijack.extra_generation_params) + # params.txt should be saved after scripts.process_batch, since the # infotext could be modified by that callback # Example: a wildcard processed by process_batch sets an extra model @@ -905,13 +909,9 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: processed = Processed(p, []) file.write(processed.infotext(p, 0)) - p.setup_conds() - for comment in model_hijack.comments: p.comment(comment) - p.extra_generation_params.update(model_hijack.extra_generation_params) - if p.n_iter > 1: shared.state.job = f"Batch {n+1} out of {p.n_iter}" From c1deec64cb89102f0efbb155845a6195fc696c89 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Wed, 6 Mar 2024 13:04:58 +0300 Subject: [PATCH 69/69] lint --- modules/api/api.py | 2 +- modules/textual_inversion/dataset.py | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 0630e77e8..29fa0011a 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -23,7 +23,7 @@ from modules.shared import opts from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.textual_inversion.textual_inversion import create_embedding, train_embedding from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork -from PIL import PngImagePlugin, Image +from PIL import PngImagePlugin from modules.sd_models_config import find_checkpoint_config_near_filename from modules.realesrgan_model import get_realesrgan_models from modules import devices diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 84fb5df01..71c032df7 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -2,7 +2,6 @@ import os import numpy as np import PIL import torch -from PIL import Image from torch.utils.data import Dataset, DataLoader, Sampler from torchvision import transforms from collections import defaultdict