diff --git a/docs/source/en/training/distributed_inference.md b/docs/source/en/training/distributed_inference.md index 64b1ea9f046d..a536703f5bce 100644 --- a/docs/source/en/training/distributed_inference.md +++ b/docs/source/en/training/distributed_inference.md @@ -223,7 +223,7 @@ from diffusers.image_processor import VaeImageProcessor import torch vae = AutoencoderKL.from_pretrained(ckpt_id, subfolder="vae", torch_dtype=torch.bfloat16).to("cuda") -vae_scale_factor = 2 ** (len(vae.config.block_out_channels)) +vae_scale_factor = 2 ** (len(vae.config.block_out_channels) - 1) image_processor = VaeImageProcessor(vae_scale_factor=vae_scale_factor) with torch.no_grad(): diff --git a/docs/source/zh/training/distributed_inference.md b/docs/source/zh/training/distributed_inference.md index ec35b5e730c6..e0537735b2ba 100644 --- a/docs/source/zh/training/distributed_inference.md +++ b/docs/source/zh/training/distributed_inference.md @@ -223,7 +223,7 @@ from diffusers.image_processor import VaeImageProcessor import torch vae = AutoencoderKL.from_pretrained(ckpt_id, subfolder="vae", torch_dtype=torch.bfloat16).to("cuda") -vae_scale_factor = 2 ** (len(vae.config.block_out_channels)) +vae_scale_factor = 2 ** (len(vae.config.block_out_channels) - 1) image_processor = VaeImageProcessor(vae_scale_factor=vae_scale_factor) with torch.no_grad():