From 4b8a192f680101de247dca79e48974b53bf961fe Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Sat, 29 Oct 2022 16:36:43 +0900
Subject: [PATCH 01/27] add optimizer save option to shared.opts

---
 modules/shared.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/modules/shared.py b/modules/shared.py
index e4f163c11..065b893d4 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -286,6 +286,7 @@ options_templates.update(options_section(('system', "System"), {
 
 options_templates.update(options_section(('training', "Training"), {
     "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM."),
+    "save_optimizer_state": OptionInfo(False, "Saves Optimizer state with checkpoints. This will cause file size to increase VERY much."),
     "dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
     "dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
     "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),

From 20194fd9752a280306fb66b57b258609b0918c46 Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Sat, 29 Oct 2022 16:56:42 +0900
Subject: [PATCH 02/27] We have duplicate linear now

---
 modules/hypernetworks/ui.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/hypernetworks/ui.py b/modules/hypernetworks/ui.py
index aad09ffc4..c2d4b51c5 100644
--- a/modules/hypernetworks/ui.py
+++ b/modules/hypernetworks/ui.py
@@ -9,7 +9,7 @@ from modules import devices, sd_hijack, shared
 from modules.hypernetworks import hypernetwork
 
 not_available = ["hardswish", "multiheadattention"]
-keys = ["linear"] + list(x for x in hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available)
+keys = list(x for x in hypernetwork.HypernetworkModule.activation_dict.keys() if x not in not_available)
 
 def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
     # Remove illegal characters from name.

From 9d96d7d0a0aa0a966a9aefd24342345eb65952ed Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Sun, 30 Oct 2022 20:39:04 +0900
Subject: [PATCH 03/27] resolve conflicts

---
 modules/hypernetworks/hypernetwork.py | 44 +++++++++++++++++++++++----
 1 file changed, 38 insertions(+), 6 deletions(-)

diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index a11e01d6f..8f74cdeae 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -21,6 +21,7 @@ from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_norm
 from collections import defaultdict, deque
 from statistics import stdev, mean
 
+optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"}
 
 class HypernetworkModule(torch.nn.Module):
     multiplier = 1.0
@@ -139,6 +140,8 @@ class Hypernetwork:
         self.weight_init = weight_init
         self.add_layer_norm = add_layer_norm
         self.use_dropout = use_dropout
+        self.optimizer_name = None
+        self.optimizer_state_dict = None
 
         for size in enable_sizes or []:
             self.layers[size] = (
@@ -171,6 +174,10 @@ class Hypernetwork:
         state_dict['use_dropout'] = self.use_dropout
         state_dict['sd_checkpoint'] = self.sd_checkpoint
         state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
+        if self.optimizer_name is not None:
+            state_dict['optimizer_name'] = self.optimizer_name
+        if self.optimizer_state_dict:
+            state_dict['optimizer_state_dict'] = self.optimizer_state_dict
 
         torch.save(state_dict, filename)
 
@@ -190,7 +197,14 @@ class Hypernetwork:
         self.add_layer_norm = state_dict.get('is_layer_norm', False)
         print(f"Layer norm is set to {self.add_layer_norm}")
         self.use_dropout = state_dict.get('use_dropout', False)
-        print(f"Dropout usage is set to {self.use_dropout}" )
+        print(f"Dropout usage is set to {self.use_dropout}")
+        self.optimizer_name = state_dict.get('optimizer_name', 'AdamW')
+        print(f"Optimizer name is {self.optimizer_name}")
+        self.optimizer_state_dict = state_dict.get('optimizer_state_dict', None)
+        if self.optimizer_state_dict:
+            print("Loaded existing optimizer from checkpoint")
+        else:
+            print("No saved optimizer exists in checkpoint")
 
         for size, sd in state_dict.items():
             if type(size) == int:
@@ -392,8 +406,19 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
     weights = hypernetwork.weights()
     for weight in weights:
         weight.requires_grad = True
-    # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
-    optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
+    # Here we use optimizer from saved HN, or we can specify as UI option.
+    if (optimizer_name := hypernetwork.optimizer_name) in optimizer_dict:
+        optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
+    else:
+        print(f"Optimizer type {optimizer_name} is not defined!")
+        optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
+        optimizer_name = 'AdamW'
+    if hypernetwork.optimizer_state_dict:  # This line must be changed if Optimizer type can be different from saved optimizer.
+        try:
+            optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
+        except RuntimeError as e:
+            print("Cannot resume from saved optimizer!")
+            print(e)
 
     steps_without_grad = 0
 
@@ -455,8 +480,11 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
             # Before saving, change name to match current checkpoint.
             hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
             last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt')
+            hypernetwork.optimizer_name = optimizer_name
+            if shared.opts.save_optimizer_state:
+                hypernetwork.optimizer_state_dict = optimizer.state_dict()
             save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
-
+            hypernetwork.optimizer_state_dict = None  # dereference it after saving, to save memory.
         textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
             "loss": f"{previous_mean_loss:.7f}",
             "learn_rate": scheduler.learn_rate
@@ -514,14 +542,18 @@ Last saved hypernetwork: {html.escape(last_saved_file)}<br/>
 Last saved image: {html.escape(last_saved_image)}<br/>
 </p>
 """
-        
     report_statistics(loss_dict)
 
     filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
+    hypernetwork.optimizer_name = optimizer_name
+    if shared.opts.save_optimizer_state:
+        hypernetwork.optimizer_state_dict = optimizer.state_dict()
     save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename)
-
+    del optimizer
+    hypernetwork.optimizer_state_dict = None  # dereference it after saving, to save memory.
     return hypernetwork, filename
 
+
 def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
     old_hypernetwork_name = hypernetwork.name
     old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None

From 3178c35224467893cf8dcedb1028c59c6c23db58 Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Wed, 2 Nov 2022 22:16:32 +0900
Subject: [PATCH 04/27] resolve conflicts

---
 modules/shared.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/shared.py b/modules/shared.py
index 065b893d4..959937d78 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -285,7 +285,7 @@ options_templates.update(options_section(('system', "System"), {
 }))
 
 options_templates.update(options_section(('training', "Training"), {
-    "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training hypernetwork. Saves VRAM."),
+    "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
     "save_optimizer_state": OptionInfo(False, "Saves Optimizer state with checkpoints. This will cause file size to increase VERY much."),
     "dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
     "dataset_filename_join_string": OptionInfo(" ", "Filename join string"),

From 9b5f85ac83f864310fe19c9deab6670bad695b0d Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Wed, 2 Nov 2022 22:18:04 +0900
Subject: [PATCH 05/27] first revert

---
 modules/shared.py | 1 -
 1 file changed, 1 deletion(-)

diff --git a/modules/shared.py b/modules/shared.py
index 959937d78..7e8c552b0 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -286,7 +286,6 @@ options_templates.update(options_section(('system', "System"), {
 
 options_templates.update(options_section(('training', "Training"), {
     "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
-    "save_optimizer_state": OptionInfo(False, "Saves Optimizer state with checkpoints. This will cause file size to increase VERY much."),
     "dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
     "dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
     "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),

From 7ea5956ad5fa925f92116e8a3bf78d7f6517b654 Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Wed, 2 Nov 2022 22:18:55 +0900
Subject: [PATCH 06/27] now add

---
 modules/shared.py | 1 +
 1 file changed, 1 insertion(+)

diff --git a/modules/shared.py b/modules/shared.py
index d8e99f857..7ecb40d83 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -309,6 +309,7 @@ options_templates.update(options_section(('system', "System"), {
 
 options_templates.update(options_section(('training', "Training"), {
     "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
+    "save_optimizer_state": OptionInfo(False, "Saves Optimizer state with checkpoints. This will cause file size to increase VERY much."),
     "dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
     "dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
     "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),

From 0b143c1163a96b193a4e8512be9c5831c661a50d Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Thu, 3 Nov 2022 14:30:53 +0900
Subject: [PATCH 07/27] Separate .optim file from model

---
 modules/hypernetworks/hypernetwork.py | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 8f74cdeae..63c25de8b 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -161,6 +161,7 @@ class Hypernetwork:
 
     def save(self, filename):
         state_dict = {}
+        optimizer_saved_dict = {}
 
         for k, v in self.layers.items():
             state_dict[k] = (v[0].state_dict(), v[1].state_dict())
@@ -175,9 +176,10 @@ class Hypernetwork:
         state_dict['sd_checkpoint'] = self.sd_checkpoint
         state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
         if self.optimizer_name is not None:
-            state_dict['optimizer_name'] = self.optimizer_name
+            optimizer_saved_dict['optimizer_name'] = self.optimizer_name
         if self.optimizer_state_dict:
-            state_dict['optimizer_state_dict'] = self.optimizer_state_dict
+            optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
+            torch.save(optimizer_saved_dict, filename + '.optim')
 
         torch.save(state_dict, filename)
 
@@ -198,9 +200,11 @@ class Hypernetwork:
         print(f"Layer norm is set to {self.add_layer_norm}")
         self.use_dropout = state_dict.get('use_dropout', False)
         print(f"Dropout usage is set to {self.use_dropout}")
-        self.optimizer_name = state_dict.get('optimizer_name', 'AdamW')
+
+        optimizer_saved_dict = torch.load(self.filename + '.optim', map_location = 'cpu') if os.path.exists(self.filename + '.optim') else {}
+        self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
         print(f"Optimizer name is {self.optimizer_name}")
-        self.optimizer_state_dict = state_dict.get('optimizer_state_dict', None)
+        self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
         if self.optimizer_state_dict:
             print("Loaded existing optimizer from checkpoint")
         else:

From 1764ac3c8bc482bd575987850e96630d9115e51a Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Thu, 3 Nov 2022 14:49:26 +0900
Subject: [PATCH 08/27] use hash to check valid optim

---
 modules/hypernetworks/hypernetwork.py | 15 ++++++++++-----
 1 file changed, 10 insertions(+), 5 deletions(-)

diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 63c25de8b..4230b8cfb 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -177,11 +177,12 @@ class Hypernetwork:
         state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
         if self.optimizer_name is not None:
             optimizer_saved_dict['optimizer_name'] = self.optimizer_name
-        if self.optimizer_state_dict:
-            optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
-            torch.save(optimizer_saved_dict, filename + '.optim')
 
         torch.save(state_dict, filename)
+        if self.optimizer_state_dict:
+            optimizer_saved_dict['hash'] = sd_models.model_hash(filename)
+            optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
+            torch.save(optimizer_saved_dict, filename + '.optim')
 
     def load(self, filename):
         self.filename = filename
@@ -204,7 +205,10 @@ class Hypernetwork:
         optimizer_saved_dict = torch.load(self.filename + '.optim', map_location = 'cpu') if os.path.exists(self.filename + '.optim') else {}
         self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
         print(f"Optimizer name is {self.optimizer_name}")
-        self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
+        if sd_models.model_hash(filename) == optimizer_saved_dict.get('hash', None):
+            self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
+        else:
+            self.optimizer_state_dict = None
         if self.optimizer_state_dict:
             print("Loaded existing optimizer from checkpoint")
         else:
@@ -229,7 +233,7 @@ def list_hypernetworks(path):
         name = os.path.splitext(os.path.basename(filename))[0]
         # Prevent a hypothetical "None.pt" from being listed.
         if name != "None":
-            res[name] = filename
+            res[name + f"({sd_models.model_hash(filename)})"] = filename
     return res
 
 
@@ -375,6 +379,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
     else:
         hypernetwork_dir = None
 
+    hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
     if create_image_every > 0:
         images_dir = os.path.join(log_directory, "images")
         os.makedirs(images_dir, exist_ok=True)

From 0abb39f461baa343ae7c23abffb261e57c3168d4 Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Fri, 4 Nov 2022 15:47:19 +0900
Subject: [PATCH 09/27] resolve conflict - first revert

---
 modules/hypernetworks/hypernetwork.py | 123 +++++++++++---------------
 1 file changed, 52 insertions(+), 71 deletions(-)

diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 4230b8cfb..674fcedd0 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -21,7 +21,6 @@ from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_norm
 from collections import defaultdict, deque
 from statistics import stdev, mean
 
-optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"}
 
 class HypernetworkModule(torch.nn.Module):
     multiplier = 1.0
@@ -34,9 +33,12 @@ class HypernetworkModule(torch.nn.Module):
         "tanh": torch.nn.Tanh,
         "sigmoid": torch.nn.Sigmoid,
     }
-    activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
+    activation_dict.update(
+        {cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if
+         inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
 
-    def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', add_layer_norm=False, use_dropout=False):
+    def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal',
+                 add_layer_norm=False, use_dropout=False):
         super().__init__()
 
         assert layer_structure is not None, "layer_structure must not be None"
@@ -47,7 +49,7 @@ class HypernetworkModule(torch.nn.Module):
         for i in range(len(layer_structure) - 1):
 
             # Add a fully-connected layer
-            linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
+            linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i + 1])))
 
             # Add an activation func
             if activation_func == "linear" or activation_func is None:
@@ -59,7 +61,7 @@ class HypernetworkModule(torch.nn.Module):
 
             # Add layer normalization
             if add_layer_norm:
-                linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
+                linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i + 1])))
 
             # Add dropout expect last layer
             if use_dropout and i < len(layer_structure) - 3:
@@ -128,7 +130,8 @@ class Hypernetwork:
     filename = None
     name = None
 
-    def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
+    def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None,
+                 add_layer_norm=False, use_dropout=False):
         self.filename = None
         self.name = name
         self.layers = {}
@@ -140,13 +143,13 @@ class Hypernetwork:
         self.weight_init = weight_init
         self.add_layer_norm = add_layer_norm
         self.use_dropout = use_dropout
-        self.optimizer_name = None
-        self.optimizer_state_dict = None
 
         for size in enable_sizes or []:
             self.layers[size] = (
-                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
-                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
+                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
+                                   self.add_layer_norm, self.use_dropout),
+                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
+                                   self.add_layer_norm, self.use_dropout),
             )
 
     def weights(self):
@@ -161,7 +164,6 @@ class Hypernetwork:
 
     def save(self, filename):
         state_dict = {}
-        optimizer_saved_dict = {}
 
         for k, v in self.layers.items():
             state_dict[k] = (v[0].state_dict(), v[1].state_dict())
@@ -175,14 +177,8 @@ class Hypernetwork:
         state_dict['use_dropout'] = self.use_dropout
         state_dict['sd_checkpoint'] = self.sd_checkpoint
         state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
-        if self.optimizer_name is not None:
-            optimizer_saved_dict['optimizer_name'] = self.optimizer_name
 
         torch.save(state_dict, filename)
-        if self.optimizer_state_dict:
-            optimizer_saved_dict['hash'] = sd_models.model_hash(filename)
-            optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
-            torch.save(optimizer_saved_dict, filename + '.optim')
 
     def load(self, filename):
         self.filename = filename
@@ -202,23 +198,13 @@ class Hypernetwork:
         self.use_dropout = state_dict.get('use_dropout', False)
         print(f"Dropout usage is set to {self.use_dropout}")
 
-        optimizer_saved_dict = torch.load(self.filename + '.optim', map_location = 'cpu') if os.path.exists(self.filename + '.optim') else {}
-        self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
-        print(f"Optimizer name is {self.optimizer_name}")
-        if sd_models.model_hash(filename) == optimizer_saved_dict.get('hash', None):
-            self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
-        else:
-            self.optimizer_state_dict = None
-        if self.optimizer_state_dict:
-            print("Loaded existing optimizer from checkpoint")
-        else:
-            print("No saved optimizer exists in checkpoint")
-
         for size, sd in state_dict.items():
             if type(size) == int:
                 self.layers[size] = (
-                    HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
-                    HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
+                    HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init,
+                                       self.add_layer_norm, self.use_dropout),
+                    HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init,
+                                       self.add_layer_norm, self.use_dropout),
                 )
 
         self.name = state_dict.get('name', self.name)
@@ -233,7 +219,7 @@ def list_hypernetworks(path):
         name = os.path.splitext(os.path.basename(filename))[0]
         # Prevent a hypothetical "None.pt" from being listed.
         if name != "None":
-            res[name + f"({sd_models.model_hash(filename)})"] = filename
+            res[name] = filename
     return res
 
 
@@ -330,7 +316,7 @@ def statistics(data):
         std = 0
     else:
         std = stdev(data)
-    total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})"
+    total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std / (len(data) ** 0.5):.3f})"
     recent_data = data[-32:]
     if len(recent_data) < 2:
         std = 0
@@ -340,7 +326,7 @@ def statistics(data):
     return total_information, recent_information
 
 
-def report_statistics(loss_info:dict):
+def report_statistics(loss_info: dict):
     keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x]))
     for key in keys:
         try:
@@ -352,14 +338,18 @@ def report_statistics(loss_info:dict):
             print(e)
 
 
-
-def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width,
+                       training_height, steps, create_image_every, save_hypernetwork_every, template_file,
+                       preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps,
+                       preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
     # images allows training previews to have infotext. Importing it at the top causes a circular import problem.
     from modules import images
 
     save_hypernetwork_every = save_hypernetwork_every or 0
     create_image_every = create_image_every or 0
-    textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, data_root, template_file, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork")
+    textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, data_root, template_file, steps,
+                                            save_hypernetwork_every, create_image_every, log_directory,
+                                            name="hypernetwork")
 
     path = shared.hypernetworks.get(hypernetwork_name, None)
     shared.loaded_hypernetwork = Hypernetwork()
@@ -379,7 +369,6 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
     else:
         hypernetwork_dir = None
 
-    hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
     if create_image_every > 0:
         images_dir = os.path.join(log_directory, "images")
         os.makedirs(images_dir, exist_ok=True)
@@ -395,39 +384,34 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
         return hypernetwork, filename
 
     scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
-    
+
     # dataset loading may take a while, so input validations and early returns should be done before this
     shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
     with torch.autocast("cuda"):
-        ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size)
+        ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width,
+                                                                height=training_height,
+                                                                repeats=shared.opts.training_image_repeats_per_epoch,
+                                                                placeholder_token=hypernetwork_name,
+                                                                model=shared.sd_model, device=devices.device,
+                                                                template_file=template_file, include_cond=True,
+                                                                batch_size=batch_size)
 
     if unload:
         shared.sd_model.cond_stage_model.to(devices.cpu)
         shared.sd_model.first_stage_model.to(devices.cpu)
 
     size = len(ds.indexes)
-    loss_dict = defaultdict(lambda : deque(maxlen = 1024))
+    loss_dict = defaultdict(lambda: deque(maxlen=1024))
     losses = torch.zeros((size,))
     previous_mean_losses = [0]
     previous_mean_loss = 0
     print("Mean loss of {} elements".format(size))
-    
+
     weights = hypernetwork.weights()
     for weight in weights:
         weight.requires_grad = True
-    # Here we use optimizer from saved HN, or we can specify as UI option.
-    if (optimizer_name := hypernetwork.optimizer_name) in optimizer_dict:
-        optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
-    else:
-        print(f"Optimizer type {optimizer_name} is not defined!")
-        optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
-        optimizer_name = 'AdamW'
-    if hypernetwork.optimizer_state_dict:  # This line must be changed if Optimizer type can be different from saved optimizer.
-        try:
-            optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
-        except RuntimeError as e:
-            print("Cannot resume from saved optimizer!")
-            print(e)
+    # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
+    optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
 
     steps_without_grad = 0
 
@@ -441,7 +425,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
         if len(loss_dict) > 0:
             previous_mean_losses = [i[-1] for i in loss_dict.values()]
             previous_mean_loss = mean(previous_mean_losses)
-            
+
         scheduler.apply(optimizer, hypernetwork.step)
         if scheduler.finished:
             break
@@ -460,7 +444,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
             losses[hypernetwork.step % losses.shape[0]] = loss.item()
             for entry in entries:
                 loss_dict[entry.filename].append(loss.item())
-                
+
             optimizer.zero_grad()
             weights[0].grad = None
             loss.backward()
@@ -475,9 +459,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
 
         steps_done = hypernetwork.step + 1
 
-        if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): 
+        if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
             raise RuntimeError("Loss diverged.")
-        
+
         if len(previous_mean_losses) > 1:
             std = stdev(previous_mean_losses)
         else:
@@ -489,11 +473,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
             # Before saving, change name to match current checkpoint.
             hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
             last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt')
-            hypernetwork.optimizer_name = optimizer_name
-            if shared.opts.save_optimizer_state:
-                hypernetwork.optimizer_state_dict = optimizer.state_dict()
             save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
-            hypernetwork.optimizer_state_dict = None  # dereference it after saving, to save memory.
+
         textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
             "loss": f"{previous_mean_loss:.7f}",
             "learn_rate": scheduler.learn_rate
@@ -529,7 +510,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
             preview_text = p.prompt
 
             processed = processing.process_images(p)
-            image = processed.images[0] if len(processed.images)>0 else None
+            image = processed.images[0] if len(processed.images) > 0 else None
 
             if unload:
                 shared.sd_model.cond_stage_model.to(devices.cpu)
@@ -537,7 +518,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
 
             if image is not None:
                 shared.state.current_image = image
-                last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
+                last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt,
+                                                                     shared.opts.samples_format, processed.infotexts[0],
+                                                                     p=p, forced_filename=forced_filename,
+                                                                     save_to_dirs=False)
                 last_saved_image += f", prompt: {preview_text}"
 
         shared.state.job_no = hypernetwork.step
@@ -551,15 +535,12 @@ Last saved hypernetwork: {html.escape(last_saved_file)}<br/>
 Last saved image: {html.escape(last_saved_image)}<br/>
 </p>
 """
+
     report_statistics(loss_dict)
 
     filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
-    hypernetwork.optimizer_name = optimizer_name
-    if shared.opts.save_optimizer_state:
-        hypernetwork.optimizer_state_dict = optimizer.state_dict()
     save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename)
-    del optimizer
-    hypernetwork.optimizer_state_dict = None  # dereference it after saving, to save memory.
+
     return hypernetwork, filename
 
 
@@ -576,4 +557,4 @@ def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
         hypernetwork.sd_checkpoint = old_sd_checkpoint
         hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name
         hypernetwork.name = old_hypernetwork_name
-        raise
+        raise
\ No newline at end of file

From 0d07cbfa15d34294a4fa22d74359cdd6fe2f799c Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Fri, 4 Nov 2022 15:50:54 +0900
Subject: [PATCH 10/27] I blame code autocomplete

---
 modules/hypernetworks/hypernetwork.py | 76 ++++++++++-----------------
 1 file changed, 27 insertions(+), 49 deletions(-)

diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 674fcedd0..a11e01d6f 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -33,12 +33,9 @@ class HypernetworkModule(torch.nn.Module):
         "tanh": torch.nn.Tanh,
         "sigmoid": torch.nn.Sigmoid,
     }
-    activation_dict.update(
-        {cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if
-         inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
+    activation_dict.update({cls_name.lower(): cls_obj for cls_name, cls_obj in inspect.getmembers(torch.nn.modules.activation) if inspect.isclass(cls_obj) and cls_obj.__module__ == 'torch.nn.modules.activation'})
 
-    def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal',
-                 add_layer_norm=False, use_dropout=False):
+    def __init__(self, dim, state_dict=None, layer_structure=None, activation_func=None, weight_init='Normal', add_layer_norm=False, use_dropout=False):
         super().__init__()
 
         assert layer_structure is not None, "layer_structure must not be None"
@@ -49,7 +46,7 @@ class HypernetworkModule(torch.nn.Module):
         for i in range(len(layer_structure) - 1):
 
             # Add a fully-connected layer
-            linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i + 1])))
+            linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
 
             # Add an activation func
             if activation_func == "linear" or activation_func is None:
@@ -61,7 +58,7 @@ class HypernetworkModule(torch.nn.Module):
 
             # Add layer normalization
             if add_layer_norm:
-                linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i + 1])))
+                linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
 
             # Add dropout expect last layer
             if use_dropout and i < len(layer_structure) - 3:
@@ -130,8 +127,7 @@ class Hypernetwork:
     filename = None
     name = None
 
-    def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None,
-                 add_layer_norm=False, use_dropout=False):
+    def __init__(self, name=None, enable_sizes=None, layer_structure=None, activation_func=None, weight_init=None, add_layer_norm=False, use_dropout=False):
         self.filename = None
         self.name = name
         self.layers = {}
@@ -146,10 +142,8 @@ class Hypernetwork:
 
         for size in enable_sizes or []:
             self.layers[size] = (
-                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
-                                   self.add_layer_norm, self.use_dropout),
-                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init,
-                                   self.add_layer_norm, self.use_dropout),
+                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
+                HypernetworkModule(size, None, self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
             )
 
     def weights(self):
@@ -196,15 +190,13 @@ class Hypernetwork:
         self.add_layer_norm = state_dict.get('is_layer_norm', False)
         print(f"Layer norm is set to {self.add_layer_norm}")
         self.use_dropout = state_dict.get('use_dropout', False)
-        print(f"Dropout usage is set to {self.use_dropout}")
+        print(f"Dropout usage is set to {self.use_dropout}" )
 
         for size, sd in state_dict.items():
             if type(size) == int:
                 self.layers[size] = (
-                    HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init,
-                                       self.add_layer_norm, self.use_dropout),
-                    HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init,
-                                       self.add_layer_norm, self.use_dropout),
+                    HypernetworkModule(size, sd[0], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
+                    HypernetworkModule(size, sd[1], self.layer_structure, self.activation_func, self.weight_init, self.add_layer_norm, self.use_dropout),
                 )
 
         self.name = state_dict.get('name', self.name)
@@ -316,7 +308,7 @@ def statistics(data):
         std = 0
     else:
         std = stdev(data)
-    total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std / (len(data) ** 0.5):.3f})"
+    total_information = f"loss:{mean(data):.3f}" + u"\u00B1" + f"({std/ (len(data) ** 0.5):.3f})"
     recent_data = data[-32:]
     if len(recent_data) < 2:
         std = 0
@@ -326,7 +318,7 @@ def statistics(data):
     return total_information, recent_information
 
 
-def report_statistics(loss_info: dict):
+def report_statistics(loss_info:dict):
     keys = sorted(loss_info.keys(), key=lambda x: sum(loss_info[x]) / len(loss_info[x]))
     for key in keys:
         try:
@@ -338,18 +330,14 @@ def report_statistics(loss_info: dict):
             print(e)
 
 
-def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width,
-                       training_height, steps, create_image_every, save_hypernetwork_every, template_file,
-                       preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps,
-                       preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
+
+def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height):
     # images allows training previews to have infotext. Importing it at the top causes a circular import problem.
     from modules import images
 
     save_hypernetwork_every = save_hypernetwork_every or 0
     create_image_every = create_image_every or 0
-    textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, data_root, template_file, steps,
-                                            save_hypernetwork_every, create_image_every, log_directory,
-                                            name="hypernetwork")
+    textual_inversion.validate_train_inputs(hypernetwork_name, learn_rate, batch_size, data_root, template_file, steps, save_hypernetwork_every, create_image_every, log_directory, name="hypernetwork")
 
     path = shared.hypernetworks.get(hypernetwork_name, None)
     shared.loaded_hypernetwork = Hypernetwork()
@@ -384,29 +372,23 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
         return hypernetwork, filename
 
     scheduler = LearnRateScheduler(learn_rate, steps, ititial_step)
-
+    
     # dataset loading may take a while, so input validations and early returns should be done before this
     shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
     with torch.autocast("cuda"):
-        ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width,
-                                                                height=training_height,
-                                                                repeats=shared.opts.training_image_repeats_per_epoch,
-                                                                placeholder_token=hypernetwork_name,
-                                                                model=shared.sd_model, device=devices.device,
-                                                                template_file=template_file, include_cond=True,
-                                                                batch_size=batch_size)
+        ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size)
 
     if unload:
         shared.sd_model.cond_stage_model.to(devices.cpu)
         shared.sd_model.first_stage_model.to(devices.cpu)
 
     size = len(ds.indexes)
-    loss_dict = defaultdict(lambda: deque(maxlen=1024))
+    loss_dict = defaultdict(lambda : deque(maxlen = 1024))
     losses = torch.zeros((size,))
     previous_mean_losses = [0]
     previous_mean_loss = 0
     print("Mean loss of {} elements".format(size))
-
+    
     weights = hypernetwork.weights()
     for weight in weights:
         weight.requires_grad = True
@@ -425,7 +407,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
         if len(loss_dict) > 0:
             previous_mean_losses = [i[-1] for i in loss_dict.values()]
             previous_mean_loss = mean(previous_mean_losses)
-
+            
         scheduler.apply(optimizer, hypernetwork.step)
         if scheduler.finished:
             break
@@ -444,7 +426,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
             losses[hypernetwork.step % losses.shape[0]] = loss.item()
             for entry in entries:
                 loss_dict[entry.filename].append(loss.item())
-
+                
             optimizer.zero_grad()
             weights[0].grad = None
             loss.backward()
@@ -459,9 +441,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
 
         steps_done = hypernetwork.step + 1
 
-        if torch.isnan(losses[hypernetwork.step % losses.shape[0]]):
+        if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): 
             raise RuntimeError("Loss diverged.")
-
+        
         if len(previous_mean_losses) > 1:
             std = stdev(previous_mean_losses)
         else:
@@ -510,7 +492,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
             preview_text = p.prompt
 
             processed = processing.process_images(p)
-            image = processed.images[0] if len(processed.images) > 0 else None
+            image = processed.images[0] if len(processed.images)>0 else None
 
             if unload:
                 shared.sd_model.cond_stage_model.to(devices.cpu)
@@ -518,10 +500,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
 
             if image is not None:
                 shared.state.current_image = image
-                last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt,
-                                                                     shared.opts.samples_format, processed.infotexts[0],
-                                                                     p=p, forced_filename=forced_filename,
-                                                                     save_to_dirs=False)
+                last_saved_image, last_text_info = images.save_image(image, images_dir, "", p.seed, p.prompt, shared.opts.samples_format, processed.infotexts[0], p=p, forced_filename=forced_filename, save_to_dirs=False)
                 last_saved_image += f", prompt: {preview_text}"
 
         shared.state.job_no = hypernetwork.step
@@ -535,7 +514,7 @@ Last saved hypernetwork: {html.escape(last_saved_file)}<br/>
 Last saved image: {html.escape(last_saved_image)}<br/>
 </p>
 """
-
+        
     report_statistics(loss_dict)
 
     filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
@@ -543,7 +522,6 @@ Last saved image: {html.escape(last_saved_image)}<br/>
 
     return hypernetwork, filename
 
-
 def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
     old_hypernetwork_name = hypernetwork.name
     old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None
@@ -557,4 +535,4 @@ def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
         hypernetwork.sd_checkpoint = old_sd_checkpoint
         hypernetwork.sd_checkpoint_name = old_sd_checkpoint_name
         hypernetwork.name = old_hypernetwork_name
-        raise
\ No newline at end of file
+        raise

From 283249d2390f0f3a1c8a55d5d9aa551e3e9b2f9c Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Fri, 4 Nov 2022 15:57:17 +0900
Subject: [PATCH 11/27] apply

---
 modules/hypernetworks/hypernetwork.py | 54 ++++++++++++++++++++++++---
 1 file changed, 49 insertions(+), 5 deletions(-)

diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 6e1a10cf3..de8688a96 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -22,6 +22,8 @@ from collections import defaultdict, deque
 from statistics import stdev, mean
 
 
+optimizer_dict = {optim_name : cls_obj for optim_name, cls_obj in inspect.getmembers(torch.optim, inspect.isclass) if optim_name != "Optimizer"}
+
 class HypernetworkModule(torch.nn.Module):
     multiplier = 1.0
     activation_dict = {
@@ -142,6 +144,8 @@ class Hypernetwork:
         self.use_dropout = use_dropout
         self.activate_output = activate_output
         self.last_layer_dropout = kwargs['last_layer_dropout'] if 'last_layer_dropout' in kwargs else True
+        self.optimizer_name = None
+        self.optimizer_state_dict = None
 
         for size in enable_sizes or []:
             self.layers[size] = (
@@ -163,6 +167,7 @@ class Hypernetwork:
 
     def save(self, filename):
         state_dict = {}
+        optimizer_saved_dict = {}
 
         for k, v in self.layers.items():
             state_dict[k] = (v[0].state_dict(), v[1].state_dict())
@@ -178,8 +183,15 @@ class Hypernetwork:
         state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
         state_dict['activate_output'] = self.activate_output
         state_dict['last_layer_dropout'] = self.last_layer_dropout
-        
+
+        if self.optimizer_name is not None:
+            optimizer_saved_dict['optimizer_name'] = self.optimizer_name
+
         torch.save(state_dict, filename)
+        if self.optimizer_state_dict:
+            optimizer_saved_dict['hash'] = sd_models.model_hash(filename)
+            optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
+            torch.save(optimizer_saved_dict, filename + '.optim')
 
     def load(self, filename):
         self.filename = filename
@@ -202,6 +214,18 @@ class Hypernetwork:
         print(f"Activate last layer is set to {self.activate_output}")
         self.last_layer_dropout = state_dict.get('last_layer_dropout', False)
 
+        optimizer_saved_dict = torch.load(self.filename + '.optim', map_location = 'cpu') if os.path.exists(self.filename + '.optim') else {}
+        self.optimizer_name = optimizer_saved_dict.get('optimizer_name', 'AdamW')
+        print(f"Optimizer name is {self.optimizer_name}")
+        if sd_models.model_hash(filename) == optimizer_saved_dict.get('hash', None):
+            self.optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None)
+        else:
+            self.optimizer_state_dict = None
+        if self.optimizer_state_dict:
+            print("Loaded existing optimizer from checkpoint")
+        else:
+            print("No saved optimizer exists in checkpoint")
+
         for size, sd in state_dict.items():
             if type(size) == int:
                 self.layers[size] = (
@@ -223,7 +247,7 @@ def list_hypernetworks(path):
         name = os.path.splitext(os.path.basename(filename))[0]
         # Prevent a hypothetical "None.pt" from being listed.
         if name != "None":
-            res[name] = filename
+            res[name + f"({sd_models.model_hash(filename)})"] = filename
     return res
 
 
@@ -369,6 +393,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
     else:
         hypernetwork_dir = None
 
+    hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
     if create_image_every > 0:
         images_dir = os.path.join(log_directory, "images")
         os.makedirs(images_dir, exist_ok=True)
@@ -404,8 +429,19 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
     weights = hypernetwork.weights()
     for weight in weights:
         weight.requires_grad = True
-    # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc...
-    optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate)
+    # Here we use optimizer from saved HN, or we can specify as UI option.
+    if (optimizer_name := hypernetwork.optimizer_name) in optimizer_dict:
+        optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
+    else:
+        print(f"Optimizer type {optimizer_name} is not defined!")
+        optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
+        optimizer_name = 'AdamW'
+    if hypernetwork.optimizer_state_dict:  # This line must be changed if Optimizer type can be different from saved optimizer.
+        try:
+            optimizer.load_state_dict(hypernetwork.optimizer_state_dict)
+        except RuntimeError as e:
+            print("Cannot resume from saved optimizer!")
+            print(e)
 
     steps_without_grad = 0
 
@@ -467,7 +503,11 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
             # Before saving, change name to match current checkpoint.
             hypernetwork_name_every = f'{hypernetwork_name}-{steps_done}'
             last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name_every}.pt')
+            hypernetwork.optimizer_name = optimizer_name
+            if shared.opts.save_optimizer_state:
+                hypernetwork.optimizer_state_dict = optimizer.state_dict()
             save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, last_saved_file)
+            hypernetwork.optimizer_state_dict = None  # dereference it after saving, to save memory.
 
         textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), {
             "loss": f"{previous_mean_loss:.7f}",
@@ -530,8 +570,12 @@ Last saved image: {html.escape(last_saved_image)}<br/>
     report_statistics(loss_dict)
 
     filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
+    hypernetwork.optimizer_name = optimizer_name
+    if shared.opts.save_optimizer_state:
+        hypernetwork.optimizer_state_dict = optimizer.state_dict()
     save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename)
-
+    del optimizer
+    hypernetwork.optimizer_state_dict = None  # dereference it after saving, to save memory.
     return hypernetwork, filename
 
 def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):

From f5d394214d6ee74a682d0a1016bcbebc4b43c13a Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Fri, 4 Nov 2022 16:04:03 +0900
Subject: [PATCH 12/27] split before declaring file name

---
 modules/hypernetworks/hypernetwork.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index de8688a96..9b6a3e627 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -382,6 +382,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
     shared.state.textinfo = "Initializing hypernetwork training..."
     shared.state.job_count = steps
 
+    hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
     filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
 
     log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name)
@@ -393,7 +394,6 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
     else:
         hypernetwork_dir = None
 
-    hypernetwork_name = hypernetwork_name.rsplit('(', 1)[0]
     if create_image_every > 0:
         images_dir = os.path.join(log_directory, "images")
         os.makedirs(images_dir, exist_ok=True)

From 1ca0bcd3a7003dd2c1324de7d97fd2a6fc5ddc53 Mon Sep 17 00:00:00 2001
From: aria1th <35677394+aria1th@users.noreply.github.com>
Date: Fri, 4 Nov 2022 16:09:19 +0900
Subject: [PATCH 13/27] only save if option is enabled

---
 modules/hypernetworks/hypernetwork.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 9b6a3e627..b1f308e2d 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -188,7 +188,7 @@ class Hypernetwork:
             optimizer_saved_dict['optimizer_name'] = self.optimizer_name
 
         torch.save(state_dict, filename)
-        if self.optimizer_state_dict:
+        if shared.opts.save_optimizer_state and self.optimizer_state_dict:
             optimizer_saved_dict['hash'] = sd_models.model_hash(filename)
             optimizer_saved_dict['optimizer_state_dict'] = self.optimizer_state_dict
             torch.save(optimizer_saved_dict, filename + '.optim')

From 7278897982bfb640ee95f144c97ed25fb3f77ea3 Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Fri, 4 Nov 2022 17:12:28 +0900
Subject: [PATCH 14/27] Update shared.py

---
 modules/shared.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/shared.py b/modules/shared.py
index 4d6e1c8b3..6e7a02e06 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -309,7 +309,7 @@ options_templates.update(options_section(('system', "System"), {
 
 options_templates.update(options_section(('training', "Training"), {
     "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."),
-    "save_optimizer_state": OptionInfo(False, "Saves Optimizer state with checkpoints. This will cause file size to increase VERY much."),
+    "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training can be resumed with HN itself and matching optim file."),
     "dataset_filename_word_regex": OptionInfo("", "Filename word regex"),
     "dataset_filename_join_string": OptionInfo(" ", "Filename join string"),
     "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}),

From fd62727893f9face287b0a9620251afaa38a627d Mon Sep 17 00:00:00 2001
From: Isaac Poulton <flagredomega@gmail.com>
Date: Fri, 4 Nov 2022 18:34:35 +0700
Subject: [PATCH 15/27] Sort hypernetworks

---
 modules/hypernetworks/hypernetwork.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 6e1a10cf3..f1f04a70c 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -224,7 +224,7 @@ def list_hypernetworks(path):
         # Prevent a hypothetical "None.pt" from being listed.
         if name != "None":
             res[name] = filename
-    return res
+    return dict(sorted(res.items()))
 
 
 def load_hypernetwork(filename):

From f316280ad3634a2343b086a6de0bfcd473e18599 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 16:48:40 +0300
Subject: [PATCH 16/27] fix the error that prevents from setting some options

---
 modules/shared.py | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/modules/shared.py b/modules/shared.py
index a9e28b9c4..962115f61 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -406,7 +406,8 @@ class Options:
             if key in self.data or key in self.data_labels:
                 assert not cmd_opts.freeze_settings, "changing settings is disabled"
 
-                comp_args = opts.data_labels[key].component_args
+                info = opts.data_labels.get(key, None)
+                comp_args = info.component_args if info else None
                 if isinstance(comp_args, dict) and comp_args.get('visible', True) is False:
                     raise RuntimeError(f"not possible to set {key} because it is restricted")
 

From 116bcf730ade8d3ac5d76d04c5887b6bba000970 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 16:48:46 +0300
Subject: [PATCH 17/27] disable setting options via API until it is fixed by
 the author

---
 modules/api/api.py | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/modules/api/api.py b/modules/api/api.py
index a49f37551..8a7ab2f52 100644
--- a/modules/api/api.py
+++ b/modules/api/api.py
@@ -218,6 +218,10 @@ class Api:
         return options
         
     def set_config(self, req: OptionsModel):
+        # currently req has all options fields even if you send a dict like { "send_seed": false }, which means it will
+        # overwrite all options with default values.
+        raise RuntimeError('Setting options via API is not supported')
+
         reqDict = vars(req)
         for o in reqDict:
             setattr(shared.opts, o, reqDict[o])

From 08feb4c364e8b2aed929fd7d22dfa21a93d78b2c Mon Sep 17 00:00:00 2001
From: Isaac Poulton <flagredomega@gmail.com>
Date: Fri, 4 Nov 2022 20:53:11 +0700
Subject: [PATCH 18/27] Sort straight out of the glob

---
 modules/hypernetworks/hypernetwork.py | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index f1f04a70c..a441ab106 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -219,12 +219,12 @@ class Hypernetwork:
 
 def list_hypernetworks(path):
     res = {}
-    for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True):
+    for filename in sorted(glob.iglob(os.path.join(path, '**/*.pt'), recursive=True)):
         name = os.path.splitext(os.path.basename(filename))[0]
         # Prevent a hypothetical "None.pt" from being listed.
         if name != "None":
             res[name] = filename
-    return dict(sorted(res.items()))
+    return res
 
 
 def load_hypernetwork(filename):

From 0d7e01d9950e013784c4b77c05aa7583ea69edc8 Mon Sep 17 00:00:00 2001
From: innovaciones <sonygarcia99@gmail.com>
Date: Fri, 4 Nov 2022 12:14:32 -0600
Subject: [PATCH 19/27] Open extensions links in new tab

Fixed for "Available" tab
---
 modules/ui_extensions.py | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py
index a81de9a7c..8e0d41d57 100644
--- a/modules/ui_extensions.py
+++ b/modules/ui_extensions.py
@@ -188,7 +188,7 @@ def refresh_available_extensions_from_data():
 
         code += f"""
             <tr>
-                <td><a href="{html.escape(url)}">{html.escape(name)}</a></td>
+                <td><a href="{html.escape(url)}" target="_blank">{html.escape(name)}</a></td>
                 <td>{html.escape(description)}</td>
                 <td>{install_code}</td>
             </tr>

From b8435e632f7ba0da12a2c8e9c788dda519279d24 Mon Sep 17 00:00:00 2001
From: evshiron <evshiron@gmail.com>
Date: Sat, 5 Nov 2022 02:36:47 +0800
Subject: [PATCH 20/27] add --cors-allow-origins cmd opt

---
 modules/shared.py | 7 ++++---
 webui.py          | 9 +++++++++
 2 files changed, 13 insertions(+), 3 deletions(-)

diff --git a/modules/shared.py b/modules/shared.py
index a9e28b9c4..e83cbcdff 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -86,6 +86,7 @@ parser.add_argument("--nowebui", action='store_true', help="use api=True to laun
 parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
 parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
 parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
+parser.add_argument("--cors-allow-origins", type=str, help="Allowed CORS origins", default=None)
 
 cmd_opts = parser.parse_args()
 restricted_opts = {
@@ -147,9 +148,9 @@ class State:
         self.interrupted = True
 
     def nextjob(self):
-        if opts.show_progress_every_n_steps == -1: 
+        if opts.show_progress_every_n_steps == -1:
             self.do_set_current_image()
-            
+
         self.job_no += 1
         self.sampling_step = 0
         self.current_image_sampling_step = 0
@@ -198,7 +199,7 @@ class State:
             return
         if self.current_latent is None:
             return
-            
+
         if opts.show_progress_grid:
             self.current_image = sd_samplers.samples_to_image_grid(self.current_latent)
         else:
diff --git a/webui.py b/webui.py
index 81df09dd2..3788af0ba 100644
--- a/webui.py
+++ b/webui.py
@@ -5,6 +5,7 @@ import importlib
 import signal
 import threading
 from fastapi import FastAPI
+from fastapi.middleware.cors import CORSMiddleware
 from fastapi.middleware.gzip import GZipMiddleware
 
 from modules.paths import script_path
@@ -93,6 +94,11 @@ def initialize():
     signal.signal(signal.SIGINT, sigint_handler)
 
 
+def setup_cors(app):
+    if cmd_opts.cors_allow_origins:
+        app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_methods=['*'])
+
+
 def create_api(app):
     from modules.api.api import Api
     api = Api(app, queue_lock)
@@ -114,6 +120,7 @@ def api_only():
     initialize()
 
     app = FastAPI()
+    setup_cors(app)
     app.add_middleware(GZipMiddleware, minimum_size=1000)
     api = create_api(app)
 
@@ -147,6 +154,8 @@ def webui():
         # runnnig its code. We disable this here. Suggested by RyotaK.
         app.user_middleware = [x for x in app.user_middleware if x.cls.__name__ != 'CORSMiddleware']
 
+        setup_cors(app)
+
         app.add_middleware(GZipMiddleware, minimum_size=1000)
 
         if launch_api:

From 467d8b967b5d1b1984ab113bec3fff217736e7ac Mon Sep 17 00:00:00 2001
From: AngelBottomless <35677394+aria1th@users.noreply.github.com>
Date: Sat, 5 Nov 2022 04:24:42 +0900
Subject: [PATCH 21/27] Fix errors from commit f2b697 with --hide-ui-dir-config

https://github.com/AUTOMATIC1111/stable-diffusion-webui/commit/f2b69709eaff88fc3a2bd49585556ec0883bf5ea
---
 modules/ui.py | 14 ++++++++------
 1 file changed, 8 insertions(+), 6 deletions(-)

diff --git a/modules/ui.py b/modules/ui.py
index 4c2829af9..76ca9b071 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1446,17 +1446,19 @@ def create_ui(wrap_gradio_gpu_call):
                 continue
 
             oldval = opts.data.get(key, None)
-
-            setattr(opts, key, value)
-
+            try:
+                setattr(opts, key, value)
+            except RuntimeError:
+                continue
             if oldval != value:
                 if opts.data_labels[key].onchange is not None:
                     opts.data_labels[key].onchange()
 
                 changed += 1
-
-        opts.save(shared.config_filename)
-
+        try:
+            opts.save(shared.config_filename)
+        except RuntimeError:
+            return opts.dumpjson(), f'{changed} settings changed without save.'
         return opts.dumpjson(), f'{changed} settings changed.'
 
     def run_settings_single(value, key):

From 30b1bcc64e67ad50c5d3af3a6fe1bd1e9553f34e Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 4 Nov 2022 22:56:18 +0300
Subject: [PATCH 22/27] fix upscale loop erroneously applied multiple times

---
 modules/upscaler.py | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/modules/upscaler.py b/modules/upscaler.py
index 83fde7ca9..c4e6e6bd6 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -57,10 +57,18 @@ class Upscaler:
         self.scale = scale
         dest_w = img.width * scale
         dest_h = img.height * scale
+
         for i in range(3):
-            if img.width > dest_w and img.height > dest_h:
-                break
+            shape = (img.width, img.height)
+
             img = self.do_upscale(img, selected_model)
+
+            if shape == (img.width, img.height):
+                break
+
+            if img.width >= dest_w and img.height >= dest_h:
+                break
+
         if img.width != dest_w or img.height != dest_h:
             img = img.resize((int(dest_w), int(dest_h)), resample=LANCZOS)
 

From ebce0c57c78a3f22178e3a38938d19ec0dfb703d Mon Sep 17 00:00:00 2001
From: Billy Cao <aliencaocao@gmail.com>
Date: Sat, 5 Nov 2022 11:38:24 +0800
Subject: [PATCH 23/27] Use typing.Optional instead of | to add support for
 Python 3.9 and below.

---
 modules/api/models.py | 26 +++++++++++++-------------
 1 file changed, 13 insertions(+), 13 deletions(-)

diff --git a/modules/api/models.py b/modules/api/models.py
index 2ae75f435..a44c5ddd0 100644
--- a/modules/api/models.py
+++ b/modules/api/models.py
@@ -1,6 +1,6 @@
 import inspect
 from pydantic import BaseModel, Field, create_model
-from typing import Any, Optional, Union
+from typing import Any, Optional
 from typing_extensions import Literal
 from inflection import underscore
 from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img
@@ -185,22 +185,22 @@ _options = vars(parser)['_option_string_actions']
 for key in _options:
     if(_options[key].dest != 'help'):
         flag = _options[key]
-        _type = str 
-        if(_options[key].default != None): _type = type(_options[key].default) 
+        _type = str
+        if _options[key].default is not None: _type = type(_options[key].default)
         flags.update({flag.dest: (_type,Field(default=flag.default, description=flag.help))})
 
 FlagsModel = create_model("Flags", **flags)
 
 class SamplerItem(BaseModel):
     name: str = Field(title="Name")
-    aliases: list[str]  = Field(title="Aliases")
+    aliases: list[str] = Field(title="Aliases")
     options: dict[str, str] = Field(title="Options")
 
 class UpscalerItem(BaseModel):
     name: str = Field(title="Name")
-    model_name: str | None = Field(title="Model Name")
-    model_path: str | None = Field(title="Path")
-    model_url: str | None = Field(title="URL")
+    model_name: Optional[str] = Field(title="Model Name")
+    model_path: Optional[str] = Field(title="Path")
+    model_url: Optional[str] = Field(title="URL")
 
 class SDModelItem(BaseModel):
     title: str = Field(title="Title")
@@ -211,21 +211,21 @@ class SDModelItem(BaseModel):
 
 class HypernetworkItem(BaseModel):
     name: str = Field(title="Name")
-    path: str | None = Field(title="Path")
+    path: Optional[str] = Field(title="Path")
 
 class FaceRestorerItem(BaseModel):
     name: str = Field(title="Name")
-    cmd_dir: str | None = Field(title="Path")
+    cmd_dir: Optional[str] = Field(title="Path")
 
 class RealesrganItem(BaseModel):
     name: str = Field(title="Name")
-    path: str | None = Field(title="Path")
-    scale: int | None = Field(title="Scale")
+    path: Optional[str] = Field(title="Path")
+    scale: Optional[int] = Field(title="Scale")
 
 class PromptStyleItem(BaseModel):
     name: str = Field(title="Name")
-    prompt: str | None = Field(title="Prompt")
-    negative_prompt: str | None = Field(title="Negative Prompt")
+    prompt: Optional[str] = Field(title="Prompt")
+    negative_prompt: Optional[str] = Field(title="Negative Prompt")
 
 class ArtistItem(BaseModel):
     name: str = Field(title="Name")

From e9a5562b9b27a1a4f9c282637b111cefd9727a41 Mon Sep 17 00:00:00 2001
From: papuSpartan <mcgpapu@gmail.com>
Date: Sat, 5 Nov 2022 04:06:51 -0500
Subject: [PATCH 24/27] add support for tls (gradio tls options)

---
 modules/shared.py |  3 +++
 webui.py          | 22 ++++++++++++++++++++--
 2 files changed, 23 insertions(+), 2 deletions(-)

diff --git a/modules/shared.py b/modules/shared.py
index 962115f61..7a20c3afa 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -86,6 +86,9 @@ parser.add_argument("--nowebui", action='store_true', help="use api=True to laun
 parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI")
 parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None)
 parser.add_argument("--administrator", action='store_true', help="Administrator rights", default=False)
+parser.add_argument("--tls-keyfile", type=str, help="Partially enables TLS, requires --tls-certfile to fully function", default=None)
+parser.add_argument("--tls-certfile", type=str, help="Partially enables TLS, requires --tls-keyfile to fully function", default=None)
+parser.add_argument("--server-name", type=str, help="Sets hostname of server", default=None)
 
 cmd_opts = parser.parse_args()
 restricted_opts = {
diff --git a/webui.py b/webui.py
index 81df09dd2..d366f4ca2 100644
--- a/webui.py
+++ b/webui.py
@@ -34,7 +34,7 @@ from modules.shared import cmd_opts
 import modules.hypernetworks.hypernetwork
 
 queue_lock = threading.Lock()
-
+server_name = "0.0.0.0" if cmd_opts.listen else cmd_opts.server_name
 
 def wrap_queued_call(func):
     def f(*args, **kwargs):
@@ -85,6 +85,22 @@ def initialize():
     shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetworks.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)))
     shared.opts.onchange("sd_hypernetwork_strength", modules.hypernetworks.hypernetwork.apply_strength)
 
+    if cmd_opts.tls_keyfile is not None and cmd_opts.tls_keyfile is not None:
+
+        try:
+            if not os.path.exists(cmd_opts.tls_keyfile):
+                print("Invalid path to TLS keyfile given")
+            if not os.path.exists(cmd_opts.tls_certfile):
+                print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
+        except TypeError:
+            cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
+            print(f"path: '{cmd_opts.tls_keyfile}' {type(cmd_opts.tls_keyfile)}")
+            print(f"path: '{cmd_opts.tls_certfile}' {type(cmd_opts.tls_certfile)}")
+            print("TLS setup invalid, running webui without TLS")
+        else:
+            print("Running with TLS")
+
+
     # make the program just exit at ctrl+c without waiting for anything
     def sigint_handler(sig, frame):
         print(f'Interrupted with signal {sig} in {frame}')
@@ -131,8 +147,10 @@ def webui():
 
         app, local_url, share_url = demo.launch(
             share=cmd_opts.share,
-            server_name="0.0.0.0" if cmd_opts.listen else None,
+            server_name=server_name,
             server_port=cmd_opts.port,
+            ssl_keyfile=cmd_opts.tls_keyfile,
+            ssl_certfile=cmd_opts.tls_certfile,
             debug=cmd_opts.gradio_debug,
             auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
             inbrowser=cmd_opts.autolaunch,

From a02bad570ef7718436369bb4e4aa5b8e0f1f5689 Mon Sep 17 00:00:00 2001
From: papuSpartan <mcgpapu@gmail.com>
Date: Sat, 5 Nov 2022 04:14:21 -0500
Subject: [PATCH 25/27] rm dbg

---
 webui.py | 2 --
 1 file changed, 2 deletions(-)

diff --git a/webui.py b/webui.py
index d366f4ca2..222dbeee7 100644
--- a/webui.py
+++ b/webui.py
@@ -94,8 +94,6 @@ def initialize():
                 print(f"Invalid path to TLS certfile: '{cmd_opts.tls_certfile}'")
         except TypeError:
             cmd_opts.tls_keyfile = cmd_opts.tls_certfile = None
-            print(f"path: '{cmd_opts.tls_keyfile}' {type(cmd_opts.tls_keyfile)}")
-            print(f"path: '{cmd_opts.tls_certfile}' {type(cmd_opts.tls_certfile)}")
             print("TLS setup invalid, running webui without TLS")
         else:
             print("Running with TLS")

From 03b08c4a6b0609f24ec789d40100529b92ef0612 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 5 Nov 2022 15:04:48 +0300
Subject: [PATCH 26/27] do not die when an extension's repo has no remote

---
 modules/extensions.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/modules/extensions.py b/modules/extensions.py
index 897af96e1..8e0977fdf 100644
--- a/modules/extensions.py
+++ b/modules/extensions.py
@@ -34,8 +34,11 @@ class Extension:
         if repo is None or repo.bare:
             self.remote = None
         else:
-            self.remote = next(repo.remote().urls, None)
-            self.status = 'unknown'
+            try:
+                self.remote = next(repo.remote().urls, None)
+                self.status = 'unknown'
+            except Exception:
+                self.remote = None
 
     def list_files(self, subdir, extension):
         from modules import scripts

From 62e3d71aa778928d63cab81d9d8cde33e55bebb3 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 5 Nov 2022 17:09:42 +0300
Subject: [PATCH 27/27] rework the code to not use the walrus operator because
 colab's 3.7 does not support it

---
 modules/hypernetworks/hypernetwork.py | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py
index 5ceed6ee3..7f182712b 100644
--- a/modules/hypernetworks/hypernetwork.py
+++ b/modules/hypernetworks/hypernetwork.py
@@ -429,13 +429,16 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log
     weights = hypernetwork.weights()
     for weight in weights:
         weight.requires_grad = True
+
     # Here we use optimizer from saved HN, or we can specify as UI option.
-    if (optimizer_name := hypernetwork.optimizer_name) in optimizer_dict:
+    if hypernetwork.optimizer_name in optimizer_dict:
         optimizer = optimizer_dict[hypernetwork.optimizer_name](params=weights, lr=scheduler.learn_rate)
+        optimizer_name = hypernetwork.optimizer_name
     else:
-        print(f"Optimizer type {optimizer_name} is not defined!")
+        print(f"Optimizer type {hypernetwork.optimizer_name} is not defined!")
         optimizer = torch.optim.AdamW(params=weights, lr=scheduler.learn_rate)
         optimizer_name = 'AdamW'
+
     if hypernetwork.optimizer_state_dict:  # This line must be changed if Optimizer type can be different from saved optimizer.
         try:
             optimizer.load_state_dict(hypernetwork.optimizer_state_dict)