mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2024-11-27 06:40:10 +08:00
run basic torch calculation at startup in parallel to reduce the performance impact of first generation
This commit is contained in:
parent
1f3182924b
commit
8faac8b963
@ -1,5 +1,7 @@
|
|||||||
import sys
|
import sys
|
||||||
import contextlib
|
import contextlib
|
||||||
|
from functools import lru_cache
|
||||||
|
|
||||||
import torch
|
import torch
|
||||||
from modules import errors
|
from modules import errors
|
||||||
|
|
||||||
@ -154,3 +156,19 @@ def test_for_nans(x, where):
|
|||||||
message += " Use --disable-nan-check commandline argument to disable this check."
|
message += " Use --disable-nan-check commandline argument to disable this check."
|
||||||
|
|
||||||
raise NansException(message)
|
raise NansException(message)
|
||||||
|
|
||||||
|
|
||||||
|
@lru_cache
|
||||||
|
def first_time_calculation():
|
||||||
|
"""
|
||||||
|
just do any calculation with pytorch layers - the first time this is done it allocaltes about 700MB of memory and
|
||||||
|
spends about 2.7 seconds doing that, at least wih NVidia.
|
||||||
|
"""
|
||||||
|
|
||||||
|
x = torch.zeros((1, 1)).to(device, dtype)
|
||||||
|
linear = torch.nn.Linear(1, 1).to(device, dtype)
|
||||||
|
linear(x)
|
||||||
|
|
||||||
|
x = torch.zeros((1, 1, 3, 3)).to(device, dtype)
|
||||||
|
conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype)
|
||||||
|
conv2d(x)
|
||||||
|
4
webui.py
4
webui.py
@ -20,7 +20,7 @@ import logging
|
|||||||
|
|
||||||
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
|
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
|
||||||
|
|
||||||
from modules import paths, timer, import_hook, errors # noqa: F401
|
from modules import paths, timer, import_hook, errors, devices # noqa: F401
|
||||||
|
|
||||||
startup_timer = timer.Timer()
|
startup_timer = timer.Timer()
|
||||||
|
|
||||||
@ -295,6 +295,8 @@ def initialize_rest(*, reload_script_modules=False):
|
|||||||
# (when reloading, this does nothing)
|
# (when reloading, this does nothing)
|
||||||
Thread(target=lambda: shared.sd_model).start()
|
Thread(target=lambda: shared.sd_model).start()
|
||||||
|
|
||||||
|
Thread(target=devices.first_time_calculation).start()
|
||||||
|
|
||||||
shared.reload_hypernetworks()
|
shared.reload_hypernetworks()
|
||||||
startup_timer.record("reload hypernetworks")
|
startup_timer.record("reload hypernetworks")
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user