-
Notifications
You must be signed in to change notification settings - Fork 27
Description
I tried it, and this error occurs.
RuntimeError Traceback (most recent call last)
Cell In[2], line 9
3 # recommend not using batch operations for sd3, as cpu memory could be exceeded.
4 prompts = [
5 # "A photo of a puppy wearing a hat.",
6 "A capybara holding a sign that reads Hello World.",
7 ]
----> 9 images = pipe(
10 prompts,
11 num_inference_steps=15,
12 guidance_scale=4.5,
13 ).images
16 for batch, image in enumerate(images):
17 image.save(f'{batch}-sd3.png')
File /home/miniconda3/envs/llm/lib/python3.12/site-packages/torch/utils/_contextlib.py:115, in context_decorator..decorate_context(*args, **kwargs)
112 @functools.wraps(func)
113 def decorate_context(*args, **kwargs):
114 with ctx_factory():
--> 115 return func(*args, **kwargs)
File /home/miniconda3/envs/llm/lib/python3.12/site-packages/diffusers/pipelines/stable_diffusion_xl/pipeline_stable_diffusion_xl.py:1237, in StableDiffusionXLPipeline.call(self, prompt, prompt_2, height, width, num_inference_steps, timesteps, sigmas, denoising_end, guidance_scale, negative_prompt, negative_prompt_2, num_images_per_prompt, eta, generator, latents, prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds, ip_adapter_image, ip_adapter_image_embeds, output_type, return_dict, cross_attention_kwargs, guidance_rescale, original_size, crops_coords_top_left, target_size, negative_original_size, negative_crops_coords_top_left, negative_target_size, clip_skip, callback_on_step_end, callback_on_step_end_tensor_inputs, **kwargs)
1235 # compute the previous noisy sample x_t -> x_t-1
1236 latents_dtype = latents.dtype
-> 1237 latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1238 if latents.dtype != latents_dtype:
1239 if torch.backends.mps.is_available():
1240 # some platforms (eg. apple mps) misbehave due to a pytorch bug: pytorch/pytorch#99272
File /home/miniconda3/envs/llm/lib/python3.12/site-packages/diffusers/schedulers/scheduling_euler_discrete.py:656, in EulerDiscreteScheduler.step(self, model_output, timestep, sample, s_churn, s_tmin, s_tmax, s_noise, generator, return_dict)
654 pred_original_sample = model_output
655 elif self.config.prediction_type == "epsilon":
--> 656 pred_original_sample = sample - sigma_hat * model_output
657 elif self.config.prediction_type == "v_prediction":
658 # denoised = model_output * c_out + input * c_skip
659 pred_original_sample = model_output * (-sigma / (sigma2 + 1) ** 0.5) + (sample / (sigma2 + 1))
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:2 and cuda:0!