You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
Traceback (most recent call last):
File "D:\Other Applications\stable-diffusion-webui\modules\call_queue.py", line 56, in f
res = list(func(*args, **kwargs))
File "D:\Other Applications\stable-diffusion-webui\modules\call_queue.py", line 37, in f
res = func(*args, **kwargs)
File "D:\Other Applications\stable-diffusion-webui\modules\txt2img.py", line 56, in txt2img
processed = process_images(p)
File "D:\Other Applications\stable-diffusion-webui\modules\processing.py", line 503, in process_images
res = process_images_inner(p)
File "D:\Other Applications\stable-diffusion-webui\modules\processing.py", line 642, in process_images_inner
uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc)
File "D:\Other Applications\stable-diffusion-webui\modules\processing.py", line 587, in get_conds_with_caching
cache[1] = function(shared.sd_model, required_prompts, steps)
File "D:\Other Applications\stable-diffusion-webui\modules\prompt_parser.py", line 140, in get_learned_conditioning
conds = model.get_learned_conditioning(texts)
File "D:\Other Applications\stable-diffusion-webui\repositories\stable-diffusion-stability-ai\ldm\models\diffusion\ddpm.py", line 669, in get_learned_conditioning
c = self.cond_stage_model(c)
File "D:\Other Applications\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "D:\Other Applications\stable-diffusion-webui\modules\sd_hijack_clip.py", line 229, in forward
z = self.process_tokens(tokens, multipliers)
File "D:\Other Applications\stable-diffusion-webui\modules\sd_hijack_clip.py", line 254, in process_tokens
z = self.encode_with_transformers(tokens)
File "D:\Other Applications\stable-diffusion-webui\modules\sd_hijack_clip.py", line 302, in encode_with_transformers
outputs = self.wrapped.transformer(input_ids=tokens, output_hidden_states=-opts.CLIP_stop_at_last_layers)
File "D:\Other Applications\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "D:\Other Applications\stable-diffusion-webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 811, in forward
return self.text_model(
File "D:\Other Applications\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "D:\Other Applications\stable-diffusion-webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 721, in forward
encoder_outputs = self.encoder(
File "D:\Other Applications\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "D:\Other Applications\stable-diffusion-webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 650, in forward
layer_outputs = encoder_layer(
File "D:\Other Applications\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "D:\Other Applications\stable-diffusion-webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 379, in forward
hidden_states, attn_weights = self.self_attn(
File "D:\Other Applications\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "D:\Other Applications\stable-diffusion-webui\venv\lib\site-packages\transformers\models\clip\modeling_clip.py", line 268, in forward
query_states = self.q_proj(hidden_states) * self.scale
File "D:\Other Applications\stable-diffusion-webui\venv\lib\site-packages\torch\nn\modules\module.py", line 1194, in _call_impl
return forward_call(*input, **kwargs)
File "D:\Other Applications\stable-diffusion-webui\extensions\a1111-sd-webui-locon\scripts......\extensions-builtin/Lora\lora.py", line 305, in lora_Linear_forward
lora_apply_weights(self)
File "D:\Other Applications\stable-diffusion-webui\extensions\a1111-sd-webui-locon\scripts......\extensions-builtin/Lora\lora.py", line 273, in lora_apply_weights
self.weight += lora_calc_updown(lora, module, self.weight)
File "D:\Other Applications\stable-diffusion-webui\extensions\a1111-sd-webui-locon\scripts\main.py", line 576, in lora_calc_updown
updown = rebuild_weight(module, target)
File "D:\Other Applications\stable-diffusion-webui\extensions\a1111-sd-webui-locon\scripts\main.py", line 565, in rebuild_weight
if len(output_shape) == 4:
UnboundLocalError: local variable 'output_shape' referenced before assignment
The text was updated successfully, but these errors were encountered:
@acorderob I have checked and this can only happend when you have other lora extension that will modify the loaded lora or modify the load lora function.
I have added a check for this, but the only solution is you may need to disable other lora function.
The only other extension that seems to do something to loras would be "stable-diffusion-webui-composable-lora", but i have it disabled. I'll try removing it, just in case.
I get sometimes this error, trying to generate an image (even with no lycoris loras).
Commit: 8e0ebd7
The text was updated successfully, but these errors were encountered: