Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Error occurred when executing VAEEncodeArgMax, any ideas? #16

Open
snakeninny opened this issue May 24, 2024 · 0 comments
Open

Error occurred when executing VAEEncodeArgMax, any ideas? #16

snakeninny opened this issue May 24, 2024 · 0 comments

Comments

@snakeninny
Copy link

Full log

got prompt
model_type EPS
Using pytorch attention in VAE
Using pytorch attention in VAE
model_type EPS
Requested to load AutoencoderKL
Loading 1 new model
!!! Exception during processing!!! GET was unable to find an engine to execute this computation
Traceback (most recent call last):
  File "/home/zyl/ComfyUI/execution.py", line 151, in recursive_execute
    output_data, output_ui = get_output_data(obj, input_data_all)
  File "/home/zyl/ComfyUI/execution.py", line 81, in get_output_data
    return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
  File "/home/zyl/ComfyUI/execution.py", line 74, in map_node_over_list
    results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
  File "/home/zyl/ComfyUI/custom_nodes/ComfyUI-IC-Light-Native/ic_light_nodes.py", line 29, in encode
    ret = super().encode(vae, pixels)
  File "/home/zyl/ComfyUI/nodes.py", line 294, in encode
    t = vae.encode(pixels[:,:,:,:3])
  File "/home/zyl/ComfyUI/comfy/sd.py", line 328, in encode
    samples[x:x+batch_number] = self.first_stage_model.encode(pixels_in).to(self.output_device).float()
  File "/home/zyl/ComfyUI/comfy/ldm/models/autoencoder.py", line 181, in encode
    z = self.encoder(x)
  File "/home/zyl/miniconda3/envs/comfy/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "/home/zyl/miniconda3/envs/comfy/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
    return forward_call(*args, **kwargs)
  File "/home/zyl/ComfyUI/comfy/ldm/modules/diffusionmodules/model.py", line 520, in forward
    h = self.conv_in(x)
  File "/home/zyl/miniconda3/envs/comfy/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1532, in _wrapped_call_impl
    return self._call_impl(*args, **kwargs)
  File "/home/zyl/miniconda3/envs/comfy/lib/python3.10/site-packages/torch/nn/modules/module.py", line 1541, in _call_impl
    return forward_call(*args, **kwargs)
  File "/home/zyl/ComfyUI/comfy/ops.py", line 66, in forward
    return super().forward(*args, **kwargs)
  File "/home/zyl/miniconda3/envs/comfy/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 460, in forward
    return self._conv_forward(input, self.weight, self.bias)
  File "/home/zyl/miniconda3/envs/comfy/lib/python3.10/site-packages/torch/nn/modules/conv.py", line 456, in _conv_forward
    return F.conv2d(input, weight, bias, self.stride,
RuntimeError: GET was unable to find an engine to execute this computation

Prompt executed in 19.43 seconds

workflow

{
  "4": {
    "inputs": {
      "ckpt_name": "realisticVisionV20_v20.safetensors"
    },
    "class_type": "CheckpointLoaderSimple",
    "_meta": {
      "title": "Load Checkpoint"
    }
  },
  "5": {
    "inputs": {
      "width": [
        "14",
        1
      ],
      "height": [
        "14",
        2
      ],
      "batch_size": 1
    },
    "class_type": "EmptyLatentImage",
    "_meta": {
      "title": "Empty Latent Image"
    }
  },
  "6": {
    "inputs": {
      "text": "beautiful woman, detailed face, sunshine from window",
      "clip": [
        "4",
        1
      ]
    },
    "class_type": "CLIPTextEncode",
    "_meta": {
      "title": "CLIP Text Encode (Prompt)"
    }
  },
  "7": {
    "inputs": {
      "text": "lowres, bad anatomy, bad hands, cropped, worst quality",
      "clip": [
        "4",
        1
      ]
    },
    "class_type": "CLIPTextEncode",
    "_meta": {
      "title": "CLIP Text Encode (Prompt)"
    }
  },
  "11": {
    "inputs": {
      "image": "自拍照1_女 (17).jpeg",
      "upload": "image"
    },
    "class_type": "LoadImage",
    "_meta": {
      "title": "Load Image"
    }
  },
  "12": {
    "inputs": {
      "rem_mode": "RMBG-1.4",
      "image_output": "Save",
      "save_prefix": "ComfyUI",
      "images": [
        "14",
        0
      ]
    },
    "class_type": "easy imageRemBg",
    "_meta": {
      "title": "Image Remove Bg"
    }
  },
  "14": {
    "inputs": {
      "width": 512,
      "height": 768,
      "interpolation": "nearest",
      "keep_proportion": false,
      "condition": "always",
      "multiple_of": 0,
      "image": [
        "11",
        0
      ]
    },
    "class_type": "ImageResize+",
    "_meta": {
      "title": "🔧 Image Resize"
    }
  },
  "16": {
    "inputs": {
      "seed": 486289408338942,
      "steps": 25,
      "cfg": 2,
      "sampler_name": "dpmpp_2m_sde",
      "scheduler": "karras",
      "denoise": 0.9,
      "model": [
        "35",
        0
      ],
      "positive": [
        "6",
        0
      ],
      "negative": [
        "7",
        0
      ],
      "latent_image": [
        "5",
        0
      ]
    },
    "class_type": "KSampler",
    "_meta": {
      "title": "KSampler"
    }
  },
  "17": {
    "inputs": {
      "samples": [
        "16",
        0
      ],
      "vae": [
        "4",
        2
      ]
    },
    "class_type": "VAEDecode",
    "_meta": {
      "title": "VAE Decode"
    }
  },
  "18": {
    "inputs": {
      "filename_prefix": "ComfyUI",
      "images": [
        "17",
        0
      ]
    },
    "class_type": "SaveImage",
    "_meta": {
      "title": "Save Image"
    }
  },
  "23": {
    "inputs": {
      "unet_name": "iclight_sd15_fc_unet_ldm.safetensors"
    },
    "class_type": "UNETLoader",
    "_meta": {
      "title": "UNETLoader"
    }
  },
  "24": {
    "inputs": {
      "pixels": [
        "36",
        0
      ],
      "vae": [
        "4",
        2
      ]
    },
    "class_type": "VAEEncodeArgMax",
    "_meta": {
      "title": "VAE Encode ArgMax"
    }
  },
  "35": {
    "inputs": {
      "model": [
        "4",
        0
      ],
      "ic_model": [
        "23",
        0
      ],
      "c_concat": [
        "24",
        0
      ]
    },
    "class_type": "ICLightAppply",
    "_meta": {
      "title": "ICLightAppply"
    }
  },
  "36": {
    "inputs": {
      "image": [
        "12",
        0
      ],
      "alpha": [
        "12",
        1
      ]
    },
    "class_type": "ICLightApplyMaskGrey",
    "_meta": {
      "title": "IC Light Apply Mask Grey"
    }
  }
}

Image

自拍照1_女

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant