diff --git a/examples/text_to_image/README.md b/examples/text_to_image/README.md index a56cccbcf5d7..f2931d3f347e 100644 --- a/examples/text_to_image/README.md +++ b/examples/text_to_image/README.md @@ -4,7 +4,7 @@ The `train_text_to_image.py` script shows how to fine-tune stable diffusion mode ___Note___: -___This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparamters to get the best result on your dataset.___ +___This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparameters to get the best result on your dataset.___ ## Running locally with PyTorch diff --git a/examples/text_to_image/README_sdxl.md b/examples/text_to_image/README_sdxl.md index 0d35b2a8ab9d..349feef5008e 100644 --- a/examples/text_to_image/README_sdxl.md +++ b/examples/text_to_image/README_sdxl.md @@ -2,7 +2,7 @@ The `train_text_to_image_sdxl.py` script shows how to fine-tune Stable Diffusion XL (SDXL) on your own dataset. -🚨 This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparamters to get the best result on your dataset. 🚨 +🚨 This script is experimental. The script fine-tunes the whole model and often times the model overfits and runs into issues like catastrophic forgetting. It's recommended to try different hyperparameters to get the best result on your dataset. 🚨 ## Running locally with PyTorch @@ -238,8 +238,8 @@ accelerate launch --config_file $ACCELERATE_CONFIG_FILE train_text_to_image_lor --validation_epochs=20 \ --seed=1234 \ --output_dir="sd-pokemon-model-lora-sdxl" \ - --validation_prompt="cute dragon creature" - + --validation_prompt="cute dragon creature" + ``` diff --git a/examples/text_to_image/test_text_to_image.py b/examples/text_to_image/test_text_to_image.py index 82c9122808aa..6231a89b1d1d 100644 --- a/examples/text_to_image/test_text_to_image.py +++ b/examples/text_to_image/test_text_to_image.py @@ -1,5 +1,6 @@ +#!/usr/bin/env python # coding=utf-8 -# Copyright 2024 HuggingFace Inc. +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/text_to_image/test_text_to_image_lora.py b/examples/text_to_image/test_text_to_image_lora.py index 57cf01f43638..4604b9f5210c 100644 --- a/examples/text_to_image/test_text_to_image_lora.py +++ b/examples/text_to_image/test_text_to_image_lora.py @@ -1,5 +1,6 @@ +#!/usr/bin/env python # coding=utf-8 -# Copyright 2024 HuggingFace Inc. +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/examples/text_to_image/train_text_to_image.py b/examples/text_to_image/train_text_to_image.py index 6fb8b17944eb..46516c9198ec 100644 --- a/examples/text_to_image/train_text_to_image.py +++ b/examples/text_to_image/train_text_to_image.py @@ -12,6 +12,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and +# limitations under the License. import argparse import logging @@ -395,7 +396,7 @@ def parse_args(): "--prediction_type", type=str, default=None, - help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.", + help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", ) parser.add_argument( "--hub_model_id", @@ -635,7 +636,7 @@ def load_model_hook(models, input_dir): ema_unet.to(accelerator.device) del load_model - for i in range(len(models)): + for _ in range(len(models)): # pop models so that they are not loaded again model = models.pop() @@ -810,7 +811,7 @@ def collate_fn(examples): if args.use_ema: ema_unet.to(accelerator.device) - # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision + # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": diff --git a/examples/text_to_image/train_text_to_image_flax.py b/examples/text_to_image/train_text_to_image_flax.py index ac6476fb0386..d1dc31e06403 100644 --- a/examples/text_to_image/train_text_to_image_flax.py +++ b/examples/text_to_image/train_text_to_image_flax.py @@ -1,3 +1,19 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2024 The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + import argparse import logging import math diff --git a/examples/text_to_image/train_text_to_image_lora.py b/examples/text_to_image/train_text_to_image_lora.py index 47e67f695b08..39590fa8666b 100644 --- a/examples/text_to_image/train_text_to_image_lora.py +++ b/examples/text_to_image/train_text_to_image_lora.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python # coding=utf-8 # Copyright 2024 The HuggingFace Inc. team. All rights reserved. # @@ -293,7 +294,7 @@ def parse_args(): "--prediction_type", type=str, default=None, - help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.", + help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", ) parser.add_argument( "--hub_model_id", @@ -454,7 +455,7 @@ def main(): vae.requires_grad_(False) text_encoder.requires_grad_(False) - # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision + # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": diff --git a/examples/text_to_image/train_text_to_image_lora_sdxl.py b/examples/text_to_image/train_text_to_image_lora_sdxl.py index 79bc66288338..be178d36dcde 100644 --- a/examples/text_to_image/train_text_to_image_lora_sdxl.py +++ b/examples/text_to_image/train_text_to_image_lora_sdxl.py @@ -370,7 +370,7 @@ def parse_args(input_args=None): "--prediction_type", type=str, default=None, - help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.", + help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", ) parser.add_argument( "--hub_model_id", @@ -585,7 +585,7 @@ def main(args): text_encoder_two.requires_grad_(False) unet.requires_grad_(False) - # For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision + # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": @@ -648,7 +648,7 @@ def unwrap_model(model): def save_model_hook(models, weights, output_dir): if accelerator.is_main_process: # there are only two options here. Either are just the unet attn processor layers - # or there are the unet and text encoder atten layers + # or there are the unet and text encoder attn layers unet_lora_layers_to_save = None text_encoder_one_lora_layers_to_save = None text_encoder_two_lora_layers_to_save = None diff --git a/examples/text_to_image/train_text_to_image_sdxl.py b/examples/text_to_image/train_text_to_image_sdxl.py index 292e52bca0f8..04f8c3dba417 100644 --- a/examples/text_to_image/train_text_to_image_sdxl.py +++ b/examples/text_to_image/train_text_to_image_sdxl.py @@ -419,7 +419,7 @@ def parse_args(input_args=None): "--prediction_type", type=str, default=None, - help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediciton_type` is chosen.", + help="The prediction_type that shall be used for training. Choose between 'epsilon' or 'v_prediction' or leave `None`. If left to `None` the default prediction type of the scheduler: `noise_scheduler.config.prediction_type` is chosen.", ) parser.add_argument( "--hub_model_id", @@ -683,7 +683,7 @@ def main(args): # Set unet as trainable. unet.train() - # For mixed precision training we cast all non-trainable weigths to half-precision + # For mixed precision training we cast all non-trainable weights to half-precision # as these weights are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": @@ -738,7 +738,7 @@ def load_model_hook(models, input_dir): ema_unet.to(accelerator.device) del load_model - for i in range(len(models)): + for _ in range(len(models)): # pop models so that they are not loaded again model = models.pop() @@ -962,7 +962,7 @@ def collate_fn(examples): if accelerator.is_main_process: accelerator.init_trackers("text2image-fine-tune-sdxl", config=vars(args)) - # Function for unwraping if torch.compile() was used in accelerate. + # Function for unwrapping if torch.compile() was used in accelerate. def unwrap_model(model): model = accelerator.unwrap_model(model) model = model._orig_mod if is_compiled_module(model) else model