Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Implement general forward method for all method in built-in lora ext #14547

Merged
merged 2 commits into from
Jan 6, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 32 additions & 1 deletion extensions-builtin/Lora/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,9 @@
from collections import namedtuple
import enum

import torch.nn as nn
import torch.nn.functional as F

from modules import sd_models, cache, errors, hashes, shared

NetworkWeights = namedtuple('NetworkWeights', ['network_key', 'sd_key', 'w', 'sd_module'])
Expand Down Expand Up @@ -115,6 +118,29 @@ def __init__(self, net: Network, weights: NetworkWeights):
if hasattr(self.sd_module, 'weight'):
self.shape = self.sd_module.weight.shape

self.ops = None
self.extra_kwargs = {}
if isinstance(self.sd_module, nn.Conv2d):
self.ops = F.conv2d
self.extra_kwargs = {
'stride': self.sd_module.stride,
'padding': self.sd_module.padding
}
elif isinstance(self.sd_module, nn.Linear):
self.ops = F.linear
elif isinstance(self.sd_module, nn.LayerNorm):
self.ops = F.layer_norm
self.extra_kwargs = {
'normalized_shape': self.sd_module.normalized_shape,
'eps': self.sd_module.eps
}
elif isinstance(self.sd_module, nn.GroupNorm):
self.ops = F.group_norm
self.extra_kwargs = {
'num_groups': self.sd_module.num_groups,
'eps': self.sd_module.eps
}

self.dim = None
self.bias = weights.w.get("bias")
self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None
Expand Down Expand Up @@ -155,5 +181,10 @@ def calc_updown(self, target):
raise NotImplementedError()

def forward(self, x, y):
raise NotImplementedError()
"""A general forward implementation for all modules"""
if self.ops is None:
raise NotImplementedError()
else:
updown, ex_bias = self.calc_updown(self.sd_module.weight)
return y + self.ops(x, weight=updown, bias=ex_bias, **self.extra_kwargs)

12 changes: 6 additions & 6 deletions extensions-builtin/Lora/networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,23 +458,23 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
self.network_current_names = wanted_names


def network_forward(module, input, original_forward):
def network_forward(org_module, input, original_forward):
"""
Old way of applying Lora by executing operations during layer's forward.
Stacking many loras this way results in big performance degradation.
"""

if len(loaded_networks) == 0:
return original_forward(module, input)
return original_forward(org_module, input)

input = devices.cond_cast_unet(input)

network_restore_weights_from_backup(module)
network_reset_cached_weight(module)
network_restore_weights_from_backup(org_module)
network_reset_cached_weight(org_module)

y = original_forward(module, input)
y = original_forward(org_module, input)

network_layer_name = getattr(module, 'network_layer_name', None)
network_layer_name = getattr(org_module, 'network_layer_name', None)
for lora in loaded_networks:
module = lora.modules.get(network_layer_name, None)
if module is None:
Expand Down