mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2024-12-27 07:39:53 +08:00
Remove activation from final layer of HNs
This commit is contained in:
parent
3e15f8e0f5
commit
2f4c91894d
@ -41,8 +41,8 @@ class HypernetworkModule(torch.nn.Module):
|
|||||||
# Add a fully-connected layer
|
# Add a fully-connected layer
|
||||||
linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
|
linears.append(torch.nn.Linear(int(dim * layer_structure[i]), int(dim * layer_structure[i+1])))
|
||||||
|
|
||||||
# Add an activation func
|
# Add an activation func except last layer
|
||||||
if activation_func == "linear" or activation_func is None:
|
if activation_func == "linear" or activation_func is None or i >= len(layer_structure) - 3:
|
||||||
pass
|
pass
|
||||||
elif activation_func in self.activation_dict:
|
elif activation_func in self.activation_dict:
|
||||||
linears.append(self.activation_dict[activation_func]())
|
linears.append(self.activation_dict[activation_func]())
|
||||||
@ -53,7 +53,7 @@ class HypernetworkModule(torch.nn.Module):
|
|||||||
if add_layer_norm:
|
if add_layer_norm:
|
||||||
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
|
linears.append(torch.nn.LayerNorm(int(dim * layer_structure[i+1])))
|
||||||
|
|
||||||
# Add dropout expect last layer
|
# Add dropout except last layer
|
||||||
if use_dropout and i < len(layer_structure) - 3:
|
if use_dropout and i < len(layer_structure) - 3:
|
||||||
linears.append(torch.nn.Dropout(p=0.3))
|
linears.append(torch.nn.Dropout(p=0.3))
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user