mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2024-11-27 06:40:10 +08:00
Merge branch 'release_candidate'
This commit is contained in:
commit
b6af0a3809
14
CHANGELOG.md
14
CHANGELOG.md
@ -1,3 +1,17 @@
|
||||
## 1.3.1
|
||||
|
||||
### Features:
|
||||
* revert default cross attention optimization to Doggettx
|
||||
|
||||
### Bug Fixes:
|
||||
* fix bug: LoRA don't apply on dropdown list sd_lora
|
||||
* fix png info always added even if setting is not enabled
|
||||
* fix some fields not applying in xyz plot
|
||||
* fix "hires. fix" prompt sharing same labels with txt2img_prompt
|
||||
* fix lora hashes not being added properly to infotex if there is only one lora
|
||||
* fix --use-cpu failing to work properly at startup
|
||||
* make --disable-opt-split-attention command line option work again
|
||||
|
||||
## 1.3.0
|
||||
|
||||
### Features:
|
||||
|
@ -62,7 +62,7 @@ parser.add_argument("--opt-split-attention-invokeai", action='store_true', help=
|
||||
parser.add_argument("--opt-split-attention-v1", action='store_true', help="prefer older version of split attention optimization for automatic choice of optimization")
|
||||
parser.add_argument("--opt-sdp-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization for automatic choice of optimization; requires PyTorch 2.*")
|
||||
parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization without memory efficient attention for automatic choice of optimization, makes image generation deterministic; requires PyTorch 2.*")
|
||||
parser.add_argument("--disable-opt-split-attention", action='store_true', help="does not do anything")
|
||||
parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization")
|
||||
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
|
||||
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
|
||||
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
|
||||
|
@ -26,7 +26,7 @@ class ExtraNetworkParams:
|
||||
self.named = {}
|
||||
|
||||
for item in self.items:
|
||||
parts = item.split('=', 2)
|
||||
parts = item.split('=', 2) if isinstance(item, str) else [item]
|
||||
if len(parts) == 2:
|
||||
self.named[parts[0]] = parts[1]
|
||||
else:
|
||||
|
@ -35,7 +35,7 @@ def reset():
|
||||
|
||||
|
||||
def quote(text):
|
||||
if ',' not in str(text) and '\n' not in str(text):
|
||||
if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text):
|
||||
return text
|
||||
|
||||
return json.dumps(text, ensure_ascii=False)
|
||||
|
@ -493,9 +493,12 @@ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_p
|
||||
existing_pnginfo['parameters'] = geninfo
|
||||
|
||||
if extension.lower() == '.png':
|
||||
pnginfo_data = PngImagePlugin.PngInfo()
|
||||
for k, v in (existing_pnginfo or {}).items():
|
||||
pnginfo_data.add_text(k, str(v))
|
||||
if opts.enable_pnginfo:
|
||||
pnginfo_data = PngImagePlugin.PngInfo()
|
||||
for k, v in (existing_pnginfo or {}).items():
|
||||
pnginfo_data.add_text(k, str(v))
|
||||
else:
|
||||
pnginfo_data = None
|
||||
|
||||
image.save(filename, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
|
||||
|
||||
|
@ -321,14 +321,13 @@ class StableDiffusionProcessing:
|
||||
have been used before. The second element is where the previously
|
||||
computed result is stored.
|
||||
"""
|
||||
|
||||
if cache[0] is not None and (required_prompts, steps) == cache[0]:
|
||||
if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info) == cache[0]:
|
||||
return cache[1]
|
||||
|
||||
with devices.autocast():
|
||||
cache[1] = function(shared.sd_model, required_prompts, steps)
|
||||
|
||||
cache[0] = (required_prompts, steps)
|
||||
cache[0] = (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info)
|
||||
return cache[1]
|
||||
|
||||
def setup_conds(self):
|
||||
|
@ -68,6 +68,8 @@ def apply_optimizations():
|
||||
|
||||
if selection == "None":
|
||||
matching_optimizer = None
|
||||
elif selection == "Automatic" and shared.cmd_opts.disable_opt_split_attention:
|
||||
matching_optimizer = None
|
||||
elif matching_optimizer is None:
|
||||
matching_optimizer = optimizers[0]
|
||||
|
||||
|
@ -59,7 +59,7 @@ class SdOptimizationSdpNoMem(SdOptimization):
|
||||
name = "sdp-no-mem"
|
||||
label = "scaled dot product without memory efficient attention"
|
||||
cmd_opt = "opt_sdp_no_mem_attention"
|
||||
priority = 90
|
||||
priority = 80
|
||||
|
||||
def is_available(self):
|
||||
return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention)
|
||||
@ -73,7 +73,7 @@ class SdOptimizationSdp(SdOptimizationSdpNoMem):
|
||||
name = "sdp"
|
||||
label = "scaled dot product"
|
||||
cmd_opt = "opt_sdp_attention"
|
||||
priority = 80
|
||||
priority = 70
|
||||
|
||||
def apply(self):
|
||||
ldm.modules.attention.CrossAttention.forward = scaled_dot_product_attention_forward
|
||||
@ -116,7 +116,7 @@ class SdOptimizationInvokeAI(SdOptimization):
|
||||
class SdOptimizationDoggettx(SdOptimization):
|
||||
name = "Doggettx"
|
||||
cmd_opt = "opt_split_attention"
|
||||
priority = 20
|
||||
priority = 90
|
||||
|
||||
def apply(self):
|
||||
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward
|
||||
|
@ -313,8 +313,6 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
|
||||
|
||||
timer.record("apply half()")
|
||||
|
||||
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
|
||||
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
|
||||
devices.dtype_unet = model.model.diffusion_model.dtype
|
||||
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
|
||||
|
||||
|
@ -6,6 +6,7 @@ import threading
|
||||
import time
|
||||
|
||||
import gradio as gr
|
||||
import torch
|
||||
import tqdm
|
||||
|
||||
import modules.interrogate
|
||||
@ -76,6 +77,9 @@ cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_op
|
||||
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
|
||||
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer'])
|
||||
|
||||
devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16
|
||||
devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16
|
||||
|
||||
device = devices.device
|
||||
weight_load_location = None if cmd_opts.lowram else "cpu"
|
||||
|
||||
|
@ -505,10 +505,10 @@ def create_ui():
|
||||
with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container:
|
||||
with gr.Column(scale=80):
|
||||
with gr.Row():
|
||||
hr_prompt = gr.Textbox(label="Prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
|
||||
hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
|
||||
with gr.Column(scale=80):
|
||||
with gr.Row():
|
||||
hr_negative_prompt = gr.Textbox(label="Negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
|
||||
hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
|
||||
|
||||
elif category == "batch":
|
||||
if not opts.dimensions_and_batch_together:
|
||||
|
Loading…
Reference in New Issue
Block a user