Skip to content

Commit

Permalink
Update --hires_fix (#2414)
Browse files Browse the repository at this point in the history
* Update --hires_fix

Change `--hires_fix` to calculate initial width and height based on the model's resolution (if available) and with a minimum size.
  • Loading branch information
JPPhoto committed Jan 29, 2023
1 parent c18db4e commit 07e03b3
Showing 1 changed file with 22 additions and 9 deletions.
31 changes: 22 additions & 9 deletions ldm/invoke/generator/txt2img2img.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,10 +38,6 @@ def get_make_image(self, prompt:str, sampler, steps:int, cfg_scale:float, ddim_e
uc, c, cfg_scale, extra_conditioning_info,
threshold = ThresholdSettings(threshold, warmup=0.2) if threshold else None)
.add_scheduler_args_if_applicable(pipeline.scheduler, eta=ddim_eta))
scale_dim = min(width, height)
scale = 512 / scale_dim

init_width, init_height = trim_to_multiple_of(scale * width, scale * height)

def make_image(x_T):

Expand All @@ -54,6 +50,10 @@ def make_image(x_T):
# TODO: threshold = threshold,
)

# Get our initial generation width and height directly from the latent output so
# the message below is accurate.
init_width = first_pass_latent_output.size()[3] * self.downsampling_factor
init_height = first_pass_latent_output.size()[2] * self.downsampling_factor
print(
f"\n>> Interpolating from {init_width}x{init_height} to {width}x{height} using DDIM sampling"
)
Expand Down Expand Up @@ -106,11 +106,24 @@ def get_noise_like(self, like: torch.Tensor):
def get_noise(self,width,height,scale = True):
# print(f"Get noise: {width}x{height}")
if scale:
trained_square = 512 * 512
actual_square = width * height
scale = math.sqrt(trained_square / actual_square)
scaled_width = math.ceil(scale * width / 64) * 64
scaled_height = math.ceil(scale * height / 64) * 64
# Scale the input width and height for the initial generation
# Make their area equivalent to the model's resolution area (e.g. 512*512 = 262144),
# while keeping the minimum dimension at least 0.5 * resolution (e.g. 512*0.5 = 256)

aspect = width / height
dimension = self.model.unet.config.sample_size * self.model.vae_scale_factor
min_dimension = math.floor(dimension * 0.5)
model_area = dimension * dimension # hardcoded for now since all models are trained on square images

if aspect > 1.0:
init_height = max(min_dimension, math.sqrt(model_area / aspect))
init_width = init_height * aspect
else:
init_width = max(min_dimension, math.sqrt(model_area * aspect))
init_height = init_width / aspect

scaled_width, scaled_height = trim_to_multiple_of(math.floor(init_width), math.floor(init_height))

else:
scaled_width = width
scaled_height = height
Expand Down

0 comments on commit 07e03b3

Please sign in to comment.