2023-01-30 15:11:30 +08:00
|
|
|
from collections import namedtuple
|
2022-09-07 04:10:12 +08:00
|
|
|
import numpy as np
|
2022-09-03 17:08:45 +08:00
|
|
|
import torch
|
2022-09-07 04:10:12 +08:00
|
|
|
from PIL import Image
|
2023-08-04 13:40:20 +08:00
|
|
|
from modules import devices, images, sd_vae_approx, sd_samplers, sd_vae_taesd, shared
|
2023-01-30 14:51:06 +08:00
|
|
|
from modules.shared import opts, state
|
2022-09-03 22:21:15 +08:00
|
|
|
|
2022-10-06 19:12:52 +08:00
|
|
|
SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
|
2022-09-03 22:21:15 +08:00
|
|
|
|
2022-10-23 01:48:13 +08:00
|
|
|
|
2022-09-19 21:42:56 +08:00
|
|
|
def setup_img2img_steps(p, steps=None):
|
|
|
|
if opts.img2img_fix_steps or steps is not None:
|
2023-01-05 04:56:43 +08:00
|
|
|
requested_steps = (steps or p.steps)
|
|
|
|
steps = int(requested_steps / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0
|
|
|
|
t_enc = requested_steps - 1
|
2022-09-16 18:38:02 +08:00
|
|
|
else:
|
|
|
|
steps = p.steps
|
|
|
|
t_enc = int(min(p.denoising_strength, 0.999) * steps)
|
|
|
|
|
|
|
|
return steps, t_enc
|
|
|
|
|
|
|
|
|
2023-05-17 14:24:01 +08:00
|
|
|
approximation_indexes = {"Full": 0, "Approx NN": 1, "Approx cheap": 2, "TAESD": 3}
|
2022-12-25 03:39:00 +08:00
|
|
|
|
|
|
|
|
2023-08-04 13:38:52 +08:00
|
|
|
def samples_to_images_tensor(sample, approximation=None, model=None):
|
|
|
|
'''latents -> images [-1, 1]'''
|
2023-05-17 14:24:01 +08:00
|
|
|
if approximation is None:
|
|
|
|
approximation = approximation_indexes.get(opts.show_progress_type, 0)
|
|
|
|
|
|
|
|
if approximation == 2:
|
2023-08-04 13:38:52 +08:00
|
|
|
x_sample = sd_vae_approx.cheap_approximation(sample)
|
2023-05-17 14:24:01 +08:00
|
|
|
elif approximation == 1:
|
2023-08-04 13:38:52 +08:00
|
|
|
x_sample = sd_vae_approx.model()(sample.to(devices.device, devices.dtype)).detach()
|
2023-05-17 14:24:01 +08:00
|
|
|
elif approximation == 3:
|
2023-05-17 17:39:07 +08:00
|
|
|
x_sample = sample * 1.5
|
2023-08-04 13:38:52 +08:00
|
|
|
x_sample = sd_vae_taesd.decoder_model()(x_sample.to(devices.device, devices.dtype)).detach()
|
|
|
|
x_sample = x_sample * 2 - 1
|
2022-12-24 19:00:17 +08:00
|
|
|
else:
|
2023-08-04 13:38:52 +08:00
|
|
|
if model is None:
|
|
|
|
model = shared.sd_model
|
|
|
|
x_sample = model.decode_first_stage(sample)
|
2023-08-04 13:40:20 +08:00
|
|
|
|
2023-08-04 13:38:52 +08:00
|
|
|
return x_sample
|
|
|
|
|
|
|
|
|
|
|
|
def single_sample_to_image(sample, approximation=None):
|
|
|
|
x_sample = samples_to_images_tensor(sample.unsqueeze(0), approximation)[0] * 0.5 + 0.5
|
2022-12-25 03:39:00 +08:00
|
|
|
|
2023-05-17 17:39:07 +08:00
|
|
|
x_sample = torch.clamp(x_sample, min=0.0, max=1.0)
|
2022-09-07 04:10:12 +08:00
|
|
|
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
|
|
|
|
x_sample = x_sample.astype(np.uint8)
|
2023-05-17 14:24:01 +08:00
|
|
|
|
2022-09-07 04:10:12 +08:00
|
|
|
return Image.fromarray(x_sample)
|
|
|
|
|
2022-10-23 01:48:13 +08:00
|
|
|
|
2023-08-04 14:09:09 +08:00
|
|
|
def decode_first_stage(model, x):
|
|
|
|
x = model.decode_first_stage(x.to(devices.dtype_vae))
|
|
|
|
|
|
|
|
return x
|
|
|
|
|
|
|
|
|
2022-12-25 03:39:00 +08:00
|
|
|
def sample_to_image(samples, index=0, approximation=None):
|
2022-12-24 19:00:17 +08:00
|
|
|
return single_sample_to_image(samples[index], approximation)
|
2022-10-23 01:48:13 +08:00
|
|
|
|
2022-11-02 17:45:03 +08:00
|
|
|
|
2022-12-25 03:39:00 +08:00
|
|
|
def samples_to_image_grid(samples, approximation=None):
|
2022-12-24 19:00:17 +08:00
|
|
|
return images.image_grid([single_sample_to_image(sample, approximation) for sample in samples])
|
2022-10-23 01:48:13 +08:00
|
|
|
|
2022-09-07 04:10:12 +08:00
|
|
|
|
2023-08-04 13:38:52 +08:00
|
|
|
def images_tensor_to_samples(image, approximation=None, model=None):
|
|
|
|
'''image[0, 1] -> latent'''
|
|
|
|
if approximation is None:
|
|
|
|
approximation = approximation_indexes.get(opts.sd_vae_encode_method, 0)
|
|
|
|
|
|
|
|
if approximation == 3:
|
|
|
|
image = image.to(devices.device, devices.dtype)
|
|
|
|
x_latent = sd_vae_taesd.encoder_model()(image) / 1.5
|
|
|
|
else:
|
|
|
|
if model is None:
|
|
|
|
model = shared.sd_model
|
|
|
|
image = image.to(shared.device, dtype=devices.dtype_vae)
|
|
|
|
image = image * 2 - 1
|
|
|
|
x_latent = model.get_first_stage_encoding(model.encode_first_stage(image))
|
|
|
|
|
|
|
|
return x_latent
|
|
|
|
|
|
|
|
|
2022-09-07 04:10:12 +08:00
|
|
|
def store_latent(decoded):
|
|
|
|
state.current_latent = decoded
|
|
|
|
|
2023-01-14 21:29:23 +08:00
|
|
|
if opts.live_previews_enable and opts.show_progress_every_n_steps > 0 and shared.state.sampling_step % opts.show_progress_every_n_steps == 0:
|
2022-09-07 04:10:12 +08:00
|
|
|
if not shared.parallel_processing_allowed:
|
2023-01-15 23:50:56 +08:00
|
|
|
shared.state.assign_current_image(sample_to_image(decoded))
|
2022-09-07 04:10:12 +08:00
|
|
|
|
|
|
|
|
2023-05-16 16:54:02 +08:00
|
|
|
def is_sampler_using_eta_noise_seed_delta(p):
|
|
|
|
"""returns whether sampler from config will use eta noise seed delta for image creation"""
|
|
|
|
|
|
|
|
sampler_config = sd_samplers.find_sampler_config(p.sampler_name)
|
|
|
|
|
|
|
|
eta = p.eta
|
|
|
|
|
|
|
|
if eta is None and p.sampler is not None:
|
|
|
|
eta = p.sampler.eta
|
|
|
|
|
|
|
|
if eta is None and sampler_config is not None:
|
|
|
|
eta = 0 if sampler_config.options.get("default_eta_is_0", False) else 1.0
|
|
|
|
|
|
|
|
if eta == 0:
|
|
|
|
return False
|
|
|
|
|
|
|
|
return sampler_config.options.get("uses_ensd", False)
|
|
|
|
|
|
|
|
|
2022-10-18 22:23:38 +08:00
|
|
|
class InterruptedException(BaseException):
|
|
|
|
pass
|
2023-04-19 11:18:58 +08:00
|
|
|
|
2023-04-29 16:29:37 +08:00
|
|
|
|
2023-08-03 12:18:55 +08:00
|
|
|
def replace_torchsde_browinan():
|
2023-04-19 11:18:58 +08:00
|
|
|
import torchsde._brownian.brownian_interval
|
|
|
|
|
|
|
|
def torchsde_randn(size, dtype, device, seed):
|
2023-08-03 12:18:55 +08:00
|
|
|
return devices.randn_local(seed, size).to(device=device, dtype=dtype)
|
2023-04-19 11:18:58 +08:00
|
|
|
|
|
|
|
torchsde._brownian.brownian_interval._randn = torchsde_randn
|
2023-08-03 12:18:55 +08:00
|
|
|
|
|
|
|
|
|
|
|
replace_torchsde_browinan()
|