From b020af15c98df9ed0aa25b595efbef5dbcb388e0 Mon Sep 17 00:00:00 2001 From: Pu Cao <48318302+caopulan@users.noreply.github.com> Date: Sat, 7 Oct 2023 15:44:32 +0800 Subject: [PATCH 1/3] Update train_custom_diffusion.py --- .../train_custom_diffusion.py | 88 +++++++++---------- 1 file changed, 44 insertions(+), 44 deletions(-) diff --git a/examples/custom_diffusion/train_custom_diffusion.py b/examples/custom_diffusion/train_custom_diffusion.py index 3288fe3258ac..387e21defd32 100644 --- a/examples/custom_diffusion/train_custom_diffusion.py +++ b/examples/custom_diffusion/train_custom_diffusion.py @@ -1214,50 +1214,50 @@ def main(args): if global_step >= args.max_train_steps: break - if accelerator.is_main_process: - images = [] - - if args.validation_prompt is not None and global_step % args.validation_steps == 0: - logger.info( - f"Running validation... \n Generating {args.num_validation_images} images with prompt:" - f" {args.validation_prompt}." - ) - # create pipeline - pipeline = DiffusionPipeline.from_pretrained( - args.pretrained_model_name_or_path, - unet=accelerator.unwrap_model(unet), - text_encoder=accelerator.unwrap_model(text_encoder), - tokenizer=tokenizer, - revision=args.revision, - torch_dtype=weight_dtype, - ) - pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) - pipeline = pipeline.to(accelerator.device) - pipeline.set_progress_bar_config(disable=True) - - # run inference - generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) - images = [ - pipeline(args.validation_prompt, num_inference_steps=25, generator=generator, eta=1.0).images[0] - for _ in range(args.num_validation_images) - ] - - for tracker in accelerator.trackers: - if tracker.name == "tensorboard": - np_images = np.stack([np.asarray(img) for img in images]) - tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") - if tracker.name == "wandb": - tracker.log( - { - "validation": [ - wandb.Image(image, caption=f"{i}: {args.validation_prompt}") - for i, image in enumerate(images) - ] - } - ) - - del pipeline - torch.cuda.empty_cache() + if accelerator.is_main_process: + images = [] + + if args.validation_prompt is not None and global_step % args.validation_steps == 0: + logger.info( + f"Running validation... \n Generating {args.num_validation_images} images with prompt:" + f" {args.validation_prompt}." + ) + # create pipeline + pipeline = DiffusionPipeline.from_pretrained( + args.pretrained_model_name_or_path, + unet=accelerator.unwrap_model(unet), + text_encoder=accelerator.unwrap_model(text_encoder), + tokenizer=tokenizer, + revision=args.revision, + torch_dtype=weight_dtype, + ) + pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) + pipeline = pipeline.to(accelerator.device) + pipeline.set_progress_bar_config(disable=True) + + # run inference + generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) + images = [ + pipeline(args.validation_prompt, num_inference_steps=25, generator=generator, eta=1.0).images[0] + for _ in range(args.num_validation_images) + ] + + for tracker in accelerator.trackers: + if tracker.name == "tensorboard": + np_images = np.stack([np.asarray(img) for img in images]) + tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC") + if tracker.name == "wandb": + tracker.log( + { + "validation": [ + wandb.Image(image, caption=f"{i}: {args.validation_prompt}") + for i, image in enumerate(images) + ] + } + ) + + del pipeline + torch.cuda.empty_cache() # Save the custom diffusion layers accelerator.wait_for_everyone() From 6cfd46f89d82a8913993a0b818d12df5ac5f279a Mon Sep 17 00:00:00 2001 From: sariel <2788787973@qq.com> Date: Sun, 8 Oct 2023 22:05:40 +0800 Subject: [PATCH 2/3] make style --- .../custom_diffusion/train_custom_diffusion.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/examples/custom_diffusion/train_custom_diffusion.py b/examples/custom_diffusion/train_custom_diffusion.py index 387e21defd32..4773446a615b 100644 --- a/examples/custom_diffusion/train_custom_diffusion.py +++ b/examples/custom_diffusion/train_custom_diffusion.py @@ -207,7 +207,7 @@ def __init__( with open(concept["class_prompt"], "r") as f: class_prompt = f.read().splitlines() - class_img_path = [(x, y) for (x, y) in zip(class_images_path, class_prompt)] + class_img_path = list(zip(class_images_path, class_prompt)) self.class_images_path.extend(class_img_path[:num_class_images]) random.shuffle(self.instance_images_path) @@ -1216,7 +1216,7 @@ def main(args): if accelerator.is_main_process: images = [] - + if args.validation_prompt is not None and global_step % args.validation_steps == 0: logger.info( f"Running validation... \n Generating {args.num_validation_images} images with prompt:" @@ -1234,14 +1234,16 @@ def main(args): pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) pipeline = pipeline.to(accelerator.device) pipeline.set_progress_bar_config(disable=True) - + # run inference generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) images = [ - pipeline(args.validation_prompt, num_inference_steps=25, generator=generator, eta=1.0).images[0] + pipeline(args.validation_prompt, num_inference_steps=25, generator=generator, eta=1.0).images[ + 0 + ] for _ in range(args.num_validation_images) ] - + for tracker in accelerator.trackers: if tracker.name == "tensorboard": np_images = np.stack([np.asarray(img) for img in images]) @@ -1255,7 +1257,7 @@ def main(args): ] } ) - + del pipeline torch.cuda.empty_cache() From ce7147daebf6be731b8215793ea99abdf5e03bec Mon Sep 17 00:00:00 2001 From: sayakpaul Date: Mon, 9 Oct 2023 09:33:50 +0200 Subject: [PATCH 3/3] Empty-Commit