Skip to content

Commit

Permalink
Merge branch 'AUTOMATIC1111:dev' into mac-builds-experimental
Browse files Browse the repository at this point in the history
  • Loading branch information
brkirch committed Aug 14, 2023
2 parents e1d59fd + 45be87a commit d219d97
Show file tree
Hide file tree
Showing 91 changed files with 4,459 additions and 2,928 deletions.
4 changes: 4 additions & 0 deletions .eslintrc.js
Original file line number Diff line number Diff line change
Expand Up @@ -87,5 +87,9 @@ module.exports = {
modalNextImage: "readonly",
// token-counters.js
setupTokenCounters: "readonly",
// localStorage.js
localSet: "readonly",
localGet: "readonly",
localRemove: "readonly"
}
};
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -115,15 +115,15 @@ Alternatively, use online services (like Google Colab):
1. Install the dependencies:
```bash
# Debian-based:
sudo apt install wget git python3 python3-venv
sudo apt install wget git python3 python3-venv libgl1 libglib2.0-0
# Red Hat-based:
sudo dnf install wget git python3
# Arch-based:
sudo pacman -S wget git python3
```
2. Navigate to the directory you would like the webui to be installed and execute the following command:
```bash
bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh)
wget -q https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh
```
3. Run `webui.sh`.
4. Check `webui-user.sh` for options.
Expand Down
10 changes: 9 additions & 1 deletion extensions-builtin/Lora/extra_networks_lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,9 +6,14 @@ class ExtraNetworkLora(extra_networks.ExtraNetwork):
def __init__(self):
super().__init__('lora')

self.errors = {}
"""mapping of network names to the number of errors the network had during operation"""

def activate(self, p, params_list):
additional = shared.opts.sd_lora

self.errors.clear()

if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional):
p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
Expand Down Expand Up @@ -56,4 +61,7 @@ def activate(self, p, params_list):
p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes)

def deactivate(self, p):
pass
if self.errors:
p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items()))

self.errors.clear()
7 changes: 5 additions & 2 deletions extensions-builtin/Lora/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def calc_scale(self):

return 1.0

def finalize_updown(self, updown, orig_weight, output_shape):
def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
if self.bias is not None:
updown = updown.reshape(self.bias.shape)
updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype)
Expand All @@ -145,7 +145,10 @@ def finalize_updown(self, updown, orig_weight, output_shape):
if orig_weight.size().numel() == updown.size().numel():
updown = updown.reshape(orig_weight.shape)

return updown * self.calc_scale() * self.multiplier()
if ex_bias is not None:
ex_bias = ex_bias * self.multiplier()

return updown * self.calc_scale() * self.multiplier(), ex_bias

def calc_updown(self, target):
raise NotImplementedError()
Expand Down
28 changes: 28 additions & 0 deletions extensions-builtin/Lora/network_norm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import network


class ModuleTypeNorm(network.ModuleType):
def create_module(self, net: network.Network, weights: network.NetworkWeights):
if all(x in weights.w for x in ["w_norm", "b_norm"]):
return NetworkModuleNorm(net, weights)

return None


class NetworkModuleNorm(network.NetworkModule):
def __init__(self, net: network.Network, weights: network.NetworkWeights):
super().__init__(net, weights)

self.w_norm = weights.w.get("w_norm")
self.b_norm = weights.w.get("b_norm")

def calc_updown(self, orig_weight):
output_shape = self.w_norm.shape
updown = self.w_norm.to(orig_weight.device, dtype=orig_weight.dtype)

if self.b_norm is not None:
ex_bias = self.b_norm.to(orig_weight.device, dtype=orig_weight.dtype)
else:
ex_bias = None

return self.finalize_updown(updown, orig_weight, output_shape, ex_bias)
162 changes: 129 additions & 33 deletions extensions-builtin/Lora/networks.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import logging
import os
import re

Expand All @@ -7,6 +8,7 @@
import network_ia3
import network_lokr
import network_full
import network_norm

import torch
from typing import Union
Expand All @@ -19,6 +21,7 @@
network_ia3.ModuleTypeIa3(),
network_lokr.ModuleTypeLokr(),
network_full.ModuleTypeFull(),
network_norm.ModuleTypeNorm(),
]


Expand All @@ -31,6 +34,8 @@
"resnets": {
"conv1": "in_layers_2",
"conv2": "out_layers_3",
"norm1": "in_layers_0",
"norm2": "out_layers_0",
"time_emb_proj": "emb_layers_1",
"conv_shortcut": "skip_connection",
}
Expand Down Expand Up @@ -190,11 +195,19 @@ def load_network(name, network_on_disk):
net.modules[key] = net_module

if keys_failed_to_match:
print(f"Failed to match keys when loading network {network_on_disk.filename}: {keys_failed_to_match}")
logging.debug(f"Network {network_on_disk.filename} didn't match keys: {keys_failed_to_match}")

return net


def purge_networks_from_memory():
while len(networks_in_memory) > shared.opts.lora_in_memory_limit and len(networks_in_memory) > 0:
name = next(iter(networks_in_memory))
networks_in_memory.pop(name, None)

devices.torch_gc()


def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=None):
already_loaded = {}

Expand All @@ -212,15 +225,19 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No

failed_to_load_networks = []

for i, name in enumerate(names):
for i, (network_on_disk, name) in enumerate(zip(networks_on_disk, names)):
net = already_loaded.get(name, None)

network_on_disk = networks_on_disk[i]

if network_on_disk is not None:
if net is None:
net = networks_in_memory.get(name)

if net is None or os.path.getmtime(network_on_disk.filename) > net.mtime:
try:
net = load_network(name, network_on_disk)

networks_in_memory.pop(name, None)
networks_in_memory[name] = net
except Exception as e:
errors.display(e, f"loading network {network_on_disk.filename}")
continue
Expand All @@ -231,7 +248,7 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No

if net is None:
failed_to_load_networks.append(name)
print(f"Couldn't find network with name {name}")
logging.info(f"Couldn't find network with name {name}")
continue

net.te_multiplier = te_multipliers[i] if te_multipliers else 1.0
Expand All @@ -240,23 +257,38 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
loaded_networks.append(net)

if failed_to_load_networks:
sd_hijack.model_hijack.comments.append("Failed to find networks: " + ", ".join(failed_to_load_networks))
sd_hijack.model_hijack.comments.append("Networks not found: " + ", ".join(failed_to_load_networks))

purge_networks_from_memory()

def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):

def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
weights_backup = getattr(self, "network_weights_backup", None)
bias_backup = getattr(self, "network_bias_backup", None)

if weights_backup is None:
if weights_backup is None and bias_backup is None:
return

if isinstance(self, torch.nn.MultiheadAttention):
self.in_proj_weight.copy_(weights_backup[0])
self.out_proj.weight.copy_(weights_backup[1])
if weights_backup is not None:
if isinstance(self, torch.nn.MultiheadAttention):
self.in_proj_weight.copy_(weights_backup[0])
self.out_proj.weight.copy_(weights_backup[1])
else:
self.weight.copy_(weights_backup)

if bias_backup is not None:
if isinstance(self, torch.nn.MultiheadAttention):
self.out_proj.bias.copy_(bias_backup)
else:
self.bias.copy_(bias_backup)
else:
self.weight.copy_(weights_backup)
if isinstance(self, torch.nn.MultiheadAttention):
self.out_proj.bias = None
else:
self.bias = None


def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.GroupNorm, torch.nn.LayerNorm, torch.nn.MultiheadAttention]):
"""
Applies the currently selected set of networks to the weights of torch layer self.
If weights already have this particular set of networks applied, does nothing.
Expand All @@ -279,43 +311,75 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn

self.network_weights_backup = weights_backup

bias_backup = getattr(self, "network_bias_backup", None)
if bias_backup is None:
if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None:
bias_backup = self.out_proj.bias.to(devices.cpu, copy=True)
elif getattr(self, 'bias', None) is not None:
bias_backup = self.bias.to(devices.cpu, copy=True)
else:
bias_backup = None
self.network_bias_backup = bias_backup

if current_names != wanted_names:
network_restore_weights_from_backup(self)

for net in loaded_networks:
module = net.modules.get(network_layer_name, None)
if module is not None and hasattr(self, 'weight'):
with torch.no_grad():
updown = module.calc_updown(self.weight)

if len(self.weight.shape) == 4 and self.weight.shape[1] == 9:
# inpainting model. zero pad updown to make channel[1] 4 to 9
updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5))
try:
with torch.no_grad():
updown, ex_bias = module.calc_updown(self.weight)

if len(self.weight.shape) == 4 and self.weight.shape[1] == 9:
# inpainting model. zero pad updown to make channel[1] 4 to 9
updown = torch.nn.functional.pad(updown, (0, 0, 0, 0, 0, 5))

self.weight += updown
if ex_bias is not None and hasattr(self, 'bias'):
if self.bias is None:
self.bias = torch.nn.Parameter(ex_bias)
else:
self.bias += ex_bias
except RuntimeError as e:
logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")
extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1

self.weight += updown
continue
continue

module_q = net.modules.get(network_layer_name + "_q_proj", None)
module_k = net.modules.get(network_layer_name + "_k_proj", None)
module_v = net.modules.get(network_layer_name + "_v_proj", None)
module_out = net.modules.get(network_layer_name + "_out_proj", None)

if isinstance(self, torch.nn.MultiheadAttention) and module_q and module_k and module_v and module_out:
with torch.no_grad():
updown_q = module_q.calc_updown(self.in_proj_weight)
updown_k = module_k.calc_updown(self.in_proj_weight)
updown_v = module_v.calc_updown(self.in_proj_weight)
updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
updown_out = module_out.calc_updown(self.out_proj.weight)

self.in_proj_weight += updown_qkv
self.out_proj.weight += updown_out
continue
try:
with torch.no_grad():
updown_q, _ = module_q.calc_updown(self.in_proj_weight)
updown_k, _ = module_k.calc_updown(self.in_proj_weight)
updown_v, _ = module_v.calc_updown(self.in_proj_weight)
updown_qkv = torch.vstack([updown_q, updown_k, updown_v])
updown_out, ex_bias = module_out.calc_updown(self.out_proj.weight)

self.in_proj_weight += updown_qkv
self.out_proj.weight += updown_out
if ex_bias is not None:
if self.out_proj.bias is None:
self.out_proj.bias = torch.nn.Parameter(ex_bias)
else:
self.out_proj.bias += ex_bias

except RuntimeError as e:
logging.debug(f"Network {net.name} layer {network_layer_name}: {e}")
extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1

continue

if module is None:
continue

print(f'failed to calculate network weights for layer {network_layer_name}')
logging.debug(f"Network {net.name} layer {network_layer_name}: couldn't find supported operation")
extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1

self.network_current_names = wanted_names

Expand All @@ -342,7 +406,7 @@ def network_forward(module, input, original_forward):
if module is None:
continue

y = module.forward(y, input)
y = module.forward(input, y)

return y

Expand Down Expand Up @@ -382,6 +446,36 @@ def network_Conv2d_load_state_dict(self, *args, **kwargs):
return torch.nn.Conv2d_load_state_dict_before_network(self, *args, **kwargs)


def network_GroupNorm_forward(self, input):
if shared.opts.lora_functional:
return network_forward(self, input, torch.nn.GroupNorm_forward_before_network)

network_apply_weights(self)

return torch.nn.GroupNorm_forward_before_network(self, input)


def network_GroupNorm_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)

return torch.nn.GroupNorm_load_state_dict_before_network(self, *args, **kwargs)


def network_LayerNorm_forward(self, input):
if shared.opts.lora_functional:
return network_forward(self, input, torch.nn.LayerNorm_forward_before_network)

network_apply_weights(self)

return torch.nn.LayerNorm_forward_before_network(self, input)


def network_LayerNorm_load_state_dict(self, *args, **kwargs):
network_reset_cached_weight(self)

return torch.nn.LayerNorm_load_state_dict_before_network(self, *args, **kwargs)


def network_MultiheadAttention_forward(self, *args, **kwargs):
network_apply_weights(self)

Expand Down Expand Up @@ -458,10 +552,12 @@ def infotext_pasted(infotext, params):
if added:
params["Prompt"] += "\n" + "".join(added)

extra_network_lora = None

available_networks = {}
available_network_aliases = {}
loaded_networks = []
networks_in_memory = {}
available_network_hash_lookup = {}
forbidden_network_aliases = {}

Expand Down
Loading

0 comments on commit d219d97

Please sign in to comment.