Skip to content

Commit

Permalink
Merge branch 'release_candidate'
Browse files Browse the repository at this point in the history
  • Loading branch information
AUTOMATIC1111 committed May 13, 2023
2 parents 5ab7f21 + 231562e commit b08500c
Show file tree
Hide file tree
Showing 71 changed files with 864 additions and 421 deletions.
45 changes: 45 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,48 @@
## 1.2.0

### Features:
* do not wait for stable diffusion model to load at startup
* add filename patterns: [denoising]
* directory hiding for extra networks: dirs starting with . will hide their cards on extra network tabs unless specifically searched for
* Lora: for the `<...>` text in prompt, use name of Lora that is in the metdata of the file, if present, instead of filename (both can be used to activate lora)
* Lora: read infotext params from kohya-ss's extension parameters if they are present and if his extension is not active
* Lora: Fix some Loras not working (ones that have 3x3 convolution layer)
* Lora: add an option to use old method of applying loras (producing same results as with kohya-ss)
* add version to infotext, footer and console output when starting
* add links to wiki for filename pattern settings
* add extended info for quicksettings setting and use multiselect input instead of a text field

### Minor:
* gradio bumped to 3.29.0
* torch bumped to 2.0.1
* --subpath option for gradio for use with reverse proxy
* linux/OSX: use existing virtualenv if already active (the VIRTUAL_ENV environment variable)
* possible frontend optimization: do not apply localizations if there are none
* Add extra `None` option for VAE in XYZ plot
* print error to console when batch processing in img2img fails
* create HTML for extra network pages only on demand
* allow directories starting with . to still list their models for lora, checkpoints, etc
* put infotext options into their own category in settings tab
* do not show licenses page when user selects Show all pages in settings

### Extensions:
* Tooltip localization support
* Add api method to get LoRA models with prompt

### Bug Fixes:
* re-add /docs endpoint
* fix gamepad navigation
* make the lightbox fullscreen image function properly
* fix squished thumbnails in extras tab
* keep "search" filter for extra networks when user refreshes the tab (previously it showed everthing after you refreshed)
* fix webui showing the same image if you configure the generation to always save results into same file
* fix bug with upscalers not working properly
* Fix MPS on PyTorch 2.0.1, Intel Macs
* make it so that custom context menu from contextMenu.js only disappears after user's click, ignoring non-user click events
* prevent Reload UI button/link from reloading the page when it's not yet ready
* fix prompts from file script failing to read contents from a drag/drop file


## 1.1.1
### Bug Fixes:
* fix an error that prevents running webui on torch<2.0 without --disable-safe-unpickle
Expand Down
1 change: 1 addition & 0 deletions extensions-builtin/Lora/extra_networks_lora.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
from modules import extra_networks, shared
import lora


class ExtraNetworkLora(extra_networks.ExtraNetwork):
def __init__(self):
super().__init__('lora')
Expand Down
116 changes: 100 additions & 16 deletions extensions-builtin/Lora/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
import torch
from typing import Union

from modules import shared, devices, sd_models, errors
from modules import shared, devices, sd_models, errors, scripts

metadata_tags_order = {"ss_sd_model_name": 1, "ss_resolution": 2, "ss_clip_skip": 3, "ss_num_train_images": 10, "ss_tag_frequency": 20}

Expand Down Expand Up @@ -93,6 +93,7 @@ def __init__(self, name, filename):
self.metadata = m

self.ssmd_cover_images = self.metadata.pop('ssmd_cover_images', None) # those are cover images and they are too big to display in UI as text
self.alias = self.metadata.get('ss_output_name', self.name)


class LoraModule:
Expand Down Expand Up @@ -165,8 +166,10 @@ def load_lora(name, filename):
module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
elif type(sd_module) == torch.nn.MultiheadAttention:
module = torch.nn.Linear(weight.shape[1], weight.shape[0], bias=False)
elif type(sd_module) == torch.nn.Conv2d:
elif type(sd_module) == torch.nn.Conv2d and weight.shape[2:] == (1, 1):
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (1, 1), bias=False)
elif type(sd_module) == torch.nn.Conv2d and weight.shape[2:] == (3, 3):
module = torch.nn.Conv2d(weight.shape[1], weight.shape[0], (3, 3), bias=False)
else:
print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}')
continue
Expand Down Expand Up @@ -199,11 +202,11 @@ def load_loras(names, multipliers=None):

loaded_loras.clear()

loras_on_disk = [available_loras.get(name, None) for name in names]
loras_on_disk = [available_lora_aliases.get(name, None) for name in names]
if any([x is None for x in loras_on_disk]):
list_available_loras()

loras_on_disk = [available_loras.get(name, None) for name in names]
loras_on_disk = [available_lora_aliases.get(name, None) for name in names]

for i, name in enumerate(names):
lora = already_loaded.get(name, None)
Expand Down Expand Up @@ -232,6 +235,8 @@ def lora_calc_updown(lora, module, target):

if up.shape[2:] == (1, 1) and down.shape[2:] == (1, 1):
updown = (up.squeeze(2).squeeze(2) @ down.squeeze(2).squeeze(2)).unsqueeze(2).unsqueeze(3)
elif up.shape[2:] == (3, 3) or down.shape[2:] == (3, 3):
updown = torch.nn.functional.conv2d(down.permute(1, 0, 2, 3), up).permute(1, 0, 2, 3)
else:
updown = up @ down

Expand All @@ -240,6 +245,19 @@ def lora_calc_updown(lora, module, target):
return updown


def lora_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
weights_backup = getattr(self, "lora_weights_backup", None)

if weights_backup is None:
return

if isinstance(self, torch.nn.MultiheadAttention):
self.in_proj_weight.copy_(weights_backup[0])
self.out_proj.weight.copy_(weights_backup[1])
else:
self.weight.copy_(weights_backup)


def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.MultiheadAttention]):
"""
Applies the currently selected set of Loras to the weights of torch layer self.
Expand All @@ -264,12 +282,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu
self.lora_weights_backup = weights_backup

if current_names != wanted_names:
if weights_backup is not None:
if isinstance(self, torch.nn.MultiheadAttention):
self.in_proj_weight.copy_(weights_backup[0])
self.out_proj.weight.copy_(weights_backup[1])
else:
self.weight.copy_(weights_backup)
lora_restore_weights_from_backup(self)

for lora in loaded_loras:
module = lora.modules.get(lora_layer_name, None)
Expand Down Expand Up @@ -300,12 +313,45 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu
setattr(self, "lora_current_names", wanted_names)


def lora_forward(module, input, original_forward):
"""
Old way of applying Lora by executing operations during layer's forward.
Stacking many loras this way results in big performance degradation.
"""

if len(loaded_loras) == 0:
return original_forward(module, input)

input = devices.cond_cast_unet(input)

lora_restore_weights_from_backup(module)
lora_reset_cached_weight(module)

res = original_forward(module, input)

lora_layer_name = getattr(module, 'lora_layer_name', None)
for lora in loaded_loras:
module = lora.modules.get(lora_layer_name, None)
if module is None:
continue

module.up.to(device=devices.device)
module.down.to(device=devices.device)

res = res + module.up(module.down(input)) * lora.multiplier * (module.alpha / module.up.weight.shape[1] if module.alpha else 1.0)

return res


def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]):
setattr(self, "lora_current_names", ())
setattr(self, "lora_weights_backup", None)


def lora_Linear_forward(self, input):
if shared.opts.lora_functional:
return lora_forward(self, input, torch.nn.Linear_forward_before_lora)

lora_apply_weights(self)

return torch.nn.Linear_forward_before_lora(self, input)
Expand All @@ -318,6 +364,9 @@ def lora_Linear_load_state_dict(self, *args, **kwargs):


def lora_Conv2d_forward(self, input):
if shared.opts.lora_functional:
return lora_forward(self, input, torch.nn.Conv2d_forward_before_lora)

lora_apply_weights(self)

return torch.nn.Conv2d_forward_before_lora(self, input)
Expand All @@ -343,24 +392,59 @@ def lora_MultiheadAttention_load_state_dict(self, *args, **kwargs):

def list_available_loras():
available_loras.clear()
available_lora_aliases.clear()

os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)

candidates = \
glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.pt'), recursive=True) + \
glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.safetensors'), recursive=True) + \
glob.glob(os.path.join(shared.cmd_opts.lora_dir, '**/*.ckpt'), recursive=True)

candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
for filename in sorted(candidates, key=str.lower):
if os.path.isdir(filename):
continue

name = os.path.splitext(os.path.basename(filename))[0]
entry = LoraOnDisk(name, filename)

available_loras[name] = entry

available_lora_aliases[name] = entry
available_lora_aliases[entry.alias] = entry


re_lora_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")


def infotext_pasted(infotext, params):
if "AddNet Module 1" in [x[1] for x in scripts.scripts_txt2img.infotext_fields]:
return # if the other extension is active, it will handle those fields, no need to do anything

added = []

for k, v in params.items():
if not k.startswith("AddNet Model "):
continue

num = k[13:]

if params.get("AddNet Module " + num) != "LoRA":
continue

name = params.get("AddNet Model " + num)
if name is None:
continue

m = re_lora_name.match(name)
if m:
name = m.group(1)

multiplier = params.get("AddNet Weight A " + num, "1.0")

available_loras[name] = LoraOnDisk(name, filename)
added.append(f"<lora:{name}:{multiplier}>")

if added:
params["Prompt"] += "\n" + "".join(added)

available_loras = {}
available_lora_aliases = {}
loaded_loras = []

list_available_loras()
27 changes: 26 additions & 1 deletion extensions-builtin/Lora/scripts/lora_script.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
import torch
import gradio as gr
from fastapi import FastAPI

import lora
import extra_networks_lora
import ui_extra_networks_lora
from modules import script_callbacks, ui_extra_networks, extra_networks, shared


def unload():
torch.nn.Linear.forward = torch.nn.Linear_forward_before_lora
torch.nn.Linear._load_from_state_dict = torch.nn.Linear_load_state_dict_before_lora
Expand Down Expand Up @@ -49,8 +49,33 @@ def before_ui():
script_callbacks.on_model_loaded(lora.assign_lora_names_to_compvis_modules)
script_callbacks.on_script_unloaded(unload)
script_callbacks.on_before_ui(before_ui)
script_callbacks.on_infotext_pasted(lora.infotext_pasted)


shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), {
"sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras),
}))


shared.options_templates.update(shared.options_section(('compatibility', "Compatibility"), {
"lora_functional": shared.OptionInfo(False, "Lora: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension"),
}))


def create_lora_json(obj: lora.LoraOnDisk):
return {
"name": obj.name,
"alias": obj.alias,
"path": obj.filename,
"metadata": obj.metadata,
}


def api_loras(_: gr.Blocks, app: FastAPI):
@app.get("/sdapi/v1/loras")
async def get_loras():
return [create_lora_json(obj) for obj in lora.available_loras.values()]


script_callbacks.on_app_started(api_loras)

2 changes: 1 addition & 1 deletion extensions-builtin/Lora/ui_extra_networks_lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ def list_items(self):
"preview": self.find_preview(path),
"description": self.find_description(path),
"search_term": self.search_terms_from_path(lora_on_disk.filename),
"prompt": json.dumps(f"<lora:{name}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
"prompt": json.dumps(f"<lora:{lora_on_disk.alias}:") + " + opts.extra_networks_default_multiplier + " + json.dumps(">"),
"local_preview": f"{path}.{shared.opts.samples_format}",
"metadata": json.dumps(lora_on_disk.metadata, indent=4) if lora_on_disk.metadata else None,
}
Expand Down
2 changes: 1 addition & 1 deletion html/extra-networks-card.html
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
<ul>
<a href="#" title="replace preview image with currently selected in gallery" onclick={save_card_preview}>replace preview</a>
</ul>
<span style="display:none" class='search_term'>{search_term}</span>
<span style="display:none" class='search_term{serach_only}'>{search_term}</span>
</div>
<span class='name'>{name}</span>
<span class='description'>{description}</span>
Expand Down
33 changes: 14 additions & 19 deletions javascript/aspectRatioOverlay.js
Original file line number Diff line number Diff line change
Expand Up @@ -45,29 +45,24 @@ function dimensionChange(e, is_width, is_height){

var viewportOffset = targetElement.getBoundingClientRect();

viewportscale = Math.min( targetElement.clientWidth/targetElement.naturalWidth, targetElement.clientHeight/targetElement.naturalHeight )
var viewportscale = Math.min( targetElement.clientWidth/targetElement.naturalWidth, targetElement.clientHeight/targetElement.naturalHeight )

scaledx = targetElement.naturalWidth*viewportscale
scaledy = targetElement.naturalHeight*viewportscale
var scaledx = targetElement.naturalWidth*viewportscale
var scaledy = targetElement.naturalHeight*viewportscale

cleintRectTop = (viewportOffset.top+window.scrollY)
cleintRectLeft = (viewportOffset.left+window.scrollX)
cleintRectCentreY = cleintRectTop + (targetElement.clientHeight/2)
cleintRectCentreX = cleintRectLeft + (targetElement.clientWidth/2)
var cleintRectTop = (viewportOffset.top+window.scrollY)
var cleintRectLeft = (viewportOffset.left+window.scrollX)
var cleintRectCentreY = cleintRectTop + (targetElement.clientHeight/2)
var cleintRectCentreX = cleintRectLeft + (targetElement.clientWidth/2)

viewRectTop = cleintRectCentreY-(scaledy/2)
viewRectLeft = cleintRectCentreX-(scaledx/2)
arRectWidth = scaledx
arRectHeight = scaledy
var arscale = Math.min( scaledx/currentWidth, scaledy/currentHeight )
var arscaledx = currentWidth*arscale
var arscaledy = currentHeight*arscale

arscale = Math.min( arRectWidth/currentWidth, arRectHeight/currentHeight )
arscaledx = currentWidth*arscale
arscaledy = currentHeight*arscale

arRectTop = cleintRectCentreY-(arscaledy/2)
arRectLeft = cleintRectCentreX-(arscaledx/2)
arRectWidth = arscaledx
arRectHeight = arscaledy
var arRectTop = cleintRectCentreY-(arscaledy/2)
var arRectLeft = cleintRectCentreX-(arscaledx/2)
var arRectWidth = arscaledx
var arRectHeight = arscaledy

arPreviewRect.style.top = arRectTop+'px';
arPreviewRect.style.left = arRectLeft+'px';
Expand Down
Loading

0 comments on commit b08500c

Please sign in to comment.