Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/community/stable_diffusion_reference.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
torch_dtype=torch.float16
).to('cuda:0')

>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe_controlnet.scheduler.config)
>>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)

>>> result_img = pipe(ref_image=input_image,
prompt="1girl",
Expand Down
2 changes: 1 addition & 1 deletion src/diffusers/loaders/lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -1192,7 +1192,7 @@ def set_lora_device(self, adapter_names: List[str], device: Union[torch.device,
class StableDiffusionXLLoraLoaderMixin(LoraLoaderMixin):
"""This class overrides `LoraLoaderMixin` with LoRA loading/saving code that's specific to SDXL"""

# Overrride to properly handle the loading and unloading of the additional text encoder.
# Override to properly handle the loading and unloading of the additional text encoder.
def load_lora_weights(
self,
pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
Expand Down
2 changes: 1 addition & 1 deletion src/diffusers/loaders/textual_inversion.py
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ def _retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer):
embedding = state_dict["string_to_param"]["*"]
else:
raise ValueError(
f"Loaded state dictonary is incorrect: {state_dict}. \n\n"
f"Loaded state dictionary is incorrect: {state_dict}. \n\n"
"Please verify that the loaded state dictionary of the textual embedding either only has a single key or includes the `string_to_param`"
" input key."
)
Expand Down
8 changes: 4 additions & 4 deletions src/diffusers/pipelines/pipeline_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def is_safetensors_compatible(filenames, variant=None, passed_components=None) -
sf_filenames.add(os.path.normpath(filename))

for filename in pt_filenames:
# filename = 'foo/bar/baz.bam' -> path = 'foo/bar', filename = 'baz', extention = '.bam'
# filename = 'foo/bar/baz.bam' -> path = 'foo/bar', filename = 'baz', extension = '.bam'
path, filename = os.path.split(filename)
filename, extension = os.path.splitext(filename)

Expand Down Expand Up @@ -375,7 +375,7 @@ def _get_pipeline_class(

if repo_id is not None and hub_revision is not None:
# if we load the pipeline code from the Hub
# make sure to overwrite the `revison`
# make sure to overwrite the `revision`
revision = hub_revision

return get_class_from_dynamic_module(
Expand Down Expand Up @@ -451,7 +451,7 @@ def load_sub_model(
)

load_method_name = None
# retrive load method name
# retrieve load method name
for class_name, class_candidate in class_candidates.items():
if class_candidate is not None and issubclass(class_obj, class_candidate):
load_method_name = importable_classes[class_name][1]
Expand Down Expand Up @@ -1897,7 +1897,7 @@ def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]:
else:
# 2. we forced `local_files_only=True` when `model_info` failed
raise EnvironmentError(
f"Cannot load model {pretrained_model_name}: model is not cached locally and an error occured"
f"Cannot load model {pretrained_model_name}: model is not cached locally and an error occurred"
" while trying to fetch metadata from the Hub. Please check out the root cause in the stacktrace"
" above."
) from model_info_call_error
Expand Down