diff --git a/examples/community/stable_diffusion_reference.py b/examples/community/stable_diffusion_reference.py index 924548b35ca3..1b02831ad19c 100644 --- a/examples/community/stable_diffusion_reference.py +++ b/examples/community/stable_diffusion_reference.py @@ -31,7 +31,7 @@ torch_dtype=torch.float16 ).to('cuda:0') - >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe_controlnet.scheduler.config) + >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config) >>> result_img = pipe(ref_image=input_image, prompt="1girl", diff --git a/src/diffusers/loaders/lora.py b/src/diffusers/loaders/lora.py index 55ccdda94973..0870f3a67a3d 100644 --- a/src/diffusers/loaders/lora.py +++ b/src/diffusers/loaders/lora.py @@ -1192,7 +1192,7 @@ def set_lora_device(self, adapter_names: List[str], device: Union[torch.device, class StableDiffusionXLLoraLoaderMixin(LoraLoaderMixin): """This class overrides `LoraLoaderMixin` with LoRA loading/saving code that's specific to SDXL""" - # Overrride to properly handle the loading and unloading of the additional text encoder. + # Override to properly handle the loading and unloading of the additional text encoder. def load_lora_weights( self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], diff --git a/src/diffusers/loaders/textual_inversion.py b/src/diffusers/loaders/textual_inversion.py index e5aeea488407..aaaf4b68bb5f 100644 --- a/src/diffusers/loaders/textual_inversion.py +++ b/src/diffusers/loaders/textual_inversion.py @@ -215,7 +215,7 @@ def _retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer): embedding = state_dict["string_to_param"]["*"] else: raise ValueError( - f"Loaded state dictonary is incorrect: {state_dict}. \n\n" + f"Loaded state dictionary is incorrect: {state_dict}. \n\n" "Please verify that the loaded state dictionary of the textual embedding either only has a single key or includes the `string_to_param`" " input key." ) diff --git a/src/diffusers/pipelines/pipeline_utils.py b/src/diffusers/pipelines/pipeline_utils.py index 18a4b5cb346b..c92df24251f4 100644 --- a/src/diffusers/pipelines/pipeline_utils.py +++ b/src/diffusers/pipelines/pipeline_utils.py @@ -170,7 +170,7 @@ def is_safetensors_compatible(filenames, variant=None, passed_components=None) - sf_filenames.add(os.path.normpath(filename)) for filename in pt_filenames: - # filename = 'foo/bar/baz.bam' -> path = 'foo/bar', filename = 'baz', extention = '.bam' + # filename = 'foo/bar/baz.bam' -> path = 'foo/bar', filename = 'baz', extension = '.bam' path, filename = os.path.split(filename) filename, extension = os.path.splitext(filename) @@ -375,7 +375,7 @@ def _get_pipeline_class( if repo_id is not None and hub_revision is not None: # if we load the pipeline code from the Hub - # make sure to overwrite the `revison` + # make sure to overwrite the `revision` revision = hub_revision return get_class_from_dynamic_module( @@ -451,7 +451,7 @@ def load_sub_model( ) load_method_name = None - # retrive load method name + # retrieve load method name for class_name, class_candidate in class_candidates.items(): if class_candidate is not None and issubclass(class_obj, class_candidate): load_method_name = importable_classes[class_name][1] @@ -1897,7 +1897,7 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]: else: # 2. we forced `local_files_only=True` when `model_info` failed raise EnvironmentError( - f"Cannot load model {pretrained_model_name}: model is not cached locally and an error occured" + f"Cannot load model {pretrained_model_name}: model is not cached locally and an error occurred" " while trying to fetch metadata from the Hub. Please check out the root cause in the stacktrace" " above." ) from model_info_call_error