Skip to content

Commit

Permalink
Merge branch 'AUTOMATIC1111-master'
Browse files Browse the repository at this point in the history
  • Loading branch information
lshqqytiger committed Jun 3, 2023
2 parents 91ce54a + 321fee1 commit ebf229b
Show file tree
Hide file tree
Showing 11 changed files with 36 additions and 16 deletions.
14 changes: 14 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,17 @@
## 1.3.1

### Features:
* revert default cross attention optimization to Doggettx

### Bug Fixes:
* fix bug: LoRA don't apply on dropdown list sd_lora
* fix png info always added even if setting is not enabled
* fix some fields not applying in xyz plot
* fix "hires. fix" prompt sharing same labels with txt2img_prompt
* fix lora hashes not being added properly to infotex if there is only one lora
* fix --use-cpu failing to work properly at startup
* make --disable-opt-split-attention command line option work again

## 1.3.0

### Features:
Expand Down
2 changes: 1 addition & 1 deletion modules/cmd_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@
parser.add_argument("--opt-split-attention-v1", action='store_true', help="prefer older version of split attention optimization for automatic choice of optimization")
parser.add_argument("--opt-sdp-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization for automatic choice of optimization; requires PyTorch 2.*")
parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization without memory efficient attention for automatic choice of optimization, makes image generation deterministic; requires PyTorch 2.*")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="does not do anything")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization")
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
Expand Down
2 changes: 1 addition & 1 deletion modules/extra_networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def __init__(self, items=None):
self.named = {}

for item in self.items:
parts = item.split('=', 2)
parts = item.split('=', 2) if isinstance(item, str) else [item]
if len(parts) == 2:
self.named[parts[0]] = parts[1]
else:
Expand Down
2 changes: 1 addition & 1 deletion modules/generation_parameters_copypaste.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def reset():


def quote(text):
if ',' not in str(text) and '\n' not in str(text):
if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text):
return text

return json.dumps(text, ensure_ascii=False)
Expand Down
9 changes: 6 additions & 3 deletions modules/images.py
Original file line number Diff line number Diff line change
Expand Up @@ -493,9 +493,12 @@ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_p
existing_pnginfo['parameters'] = geninfo

if extension.lower() == '.png':
pnginfo_data = PngImagePlugin.PngInfo()
for k, v in (existing_pnginfo or {}).items():
pnginfo_data.add_text(k, str(v))
if opts.enable_pnginfo:
pnginfo_data = PngImagePlugin.PngInfo()
for k, v in (existing_pnginfo or {}).items():
pnginfo_data.add_text(k, str(v))
else:
pnginfo_data = None

image.save(filename, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data)

Expand Down
5 changes: 2 additions & 3 deletions modules/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -321,14 +321,13 @@ def get_conds_with_caching(self, function, required_prompts, steps, cache):
have been used before. The second element is where the previously
computed result is stored.
"""

if cache[0] is not None and (required_prompts, steps) == cache[0]:
if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info) == cache[0]:
return cache[1]

with devices.autocast():
cache[1] = function(shared.sd_model, required_prompts, steps)

cache[0] = (required_prompts, steps)
cache[0] = (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info)
return cache[1]

def setup_conds(self):
Expand Down
2 changes: 2 additions & 0 deletions modules/sd_hijack.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,8 @@ def apply_optimizations():

if selection == "None":
matching_optimizer = None
elif selection == "Automatic" and shared.cmd_opts.disable_opt_split_attention:
matching_optimizer = None
elif matching_optimizer is None:
matching_optimizer = optimizers[0]

Expand Down
6 changes: 3 additions & 3 deletions modules/sd_hijack_optimizations.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ class SdOptimizationSdpNoMem(SdOptimization):
name = "sdp-no-mem"
label = "scaled dot product without memory efficient attention"
cmd_opt = "opt_sdp_no_mem_attention"
priority = 90
priority = 80

def is_available(self):
return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention)
Expand All @@ -73,7 +73,7 @@ class SdOptimizationSdp(SdOptimizationSdpNoMem):
name = "sdp"
label = "scaled dot product"
cmd_opt = "opt_sdp_attention"
priority = 80
priority = 70

def apply(self):
ldm.modules.attention.CrossAttention.forward = scaled_dot_product_attention_forward
Expand Down Expand Up @@ -116,7 +116,7 @@ def apply(self):
class SdOptimizationDoggettx(SdOptimization):
name = "Doggettx"
cmd_opt = "opt_split_attention"
priority = 20
priority = 90

def apply(self):
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward
Expand Down
2 changes: 0 additions & 2 deletions modules/sd_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,8 +319,6 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer

timer.record("apply half()")

devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
devices.dtype_unet = model.model.diffusion_model.dtype
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16

Expand Down
4 changes: 4 additions & 0 deletions modules/shared.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
import time

import gradio as gr
import torch
import tqdm

import modules.interrogate
Expand Down Expand Up @@ -77,6 +78,9 @@
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer'])

devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16
devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16

device = devices.device
weight_load_location = None if cmd_opts.lowram else "cpu"

Expand Down
4 changes: 2 additions & 2 deletions modules/ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -506,10 +506,10 @@ def create_ui():
with FormRow(elem_id="txt2img_hires_fix_row4", variant="compact", visible=opts.hires_fix_show_prompts) as hr_prompts_container:
with gr.Column(scale=80):
with gr.Row():
hr_prompt = gr.Textbox(label="Prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
hr_prompt = gr.Textbox(label="Hires prompt", elem_id="hires_prompt", show_label=False, lines=3, placeholder="Prompt for hires fix pass.\nLeave empty to use the same prompt as in first pass.", elem_classes=["prompt"])
with gr.Column(scale=80):
with gr.Row():
hr_negative_prompt = gr.Textbox(label="Negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])
hr_negative_prompt = gr.Textbox(label="Hires negative prompt", elem_id="hires_neg_prompt", show_label=False, lines=3, placeholder="Negative prompt for hires fix pass.\nLeave empty to use the same negative prompt as in first pass.", elem_classes=["prompt"])

elif category == "batch":
if not opts.dimensions_and_batch_together:
Expand Down

0 comments on commit ebf229b

Please sign in to comment.