Skip to content

Commit d8e8a44

Browse files
committed
make style
1 parent 4790104 commit d8e8a44

File tree

52 files changed

+207
-356
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

52 files changed

+207
-356
lines changed

src/diffusers/guiders/guider_utils.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -247,11 +247,10 @@ def from_pretrained(
247247
The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
248248
allowed by Git.
249249
250-
> [!TIP]
251-
> To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
252-
> auth login`. You can also activate the special
253-
> ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a
254-
> firewalled environment.
250+
> [!TIP] > To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in
251+
with `hf > auth login`. You can also activate the special >
252+
["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a >
253+
firewalled environment.
255254
256255
"""
257256
config, kwargs, commit_hash = cls.load_config(

src/diffusers/loaders/lora_base.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -544,8 +544,7 @@ def fuse_lora(
544544
r"""
545545
Fuses the LoRA parameters into the original parameters of the corresponding blocks.
546546
547-
> [!WARNING]
548-
> This is an experimental API.
547+
> [!WARNING] > This is an experimental API.
549548
550549
Args:
551550
components: (`List[str]`): List of LoRA-injectable components to fuse the LoRAs into.
@@ -625,8 +624,7 @@ def unfuse_lora(self, components: List[str] = [], **kwargs):
625624
Reverses the effect of
626625
[`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraBaseMixin.fuse_lora).
627626
628-
> [!WARNING]
629-
> This is an experimental API.
627+
> [!WARNING] > This is an experimental API.
630628
631629
Args:
632630
components (`List[str]`): List of LoRA-injectable components to unfuse LoRA from.

src/diffusers/loaders/lora_pipeline.py

Lines changed: 64 additions & 128 deletions
Large diffs are not rendered by default.

src/diffusers/models/attention.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -111,8 +111,7 @@ def fuse_qkv_projections(self):
111111
def unfuse_qkv_projections(self):
112112
"""Disables the fused QKV projection if enabled.
113113
114-
> [!WARNING]
115-
> This API is 🧪 experimental.
114+
> [!WARNING] > This API is 🧪 experimental.
116115
"""
117116
for module in self.modules():
118117
if isinstance(module, AttentionModuleMixin):

src/diffusers/models/attention_processor.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3669,8 +3669,7 @@ class FusedAttnProcessor2_0:
36693669
fused projection layers. For self-attention modules, all projection matrices (i.e., query, key, value) are fused.
36703670
For cross-attention modules, key and value projection matrices are fused.
36713671
3672-
> [!WARNING]
3673-
> This API is currently 🧪 experimental in nature and can change in future.
3672+
> [!WARNING] > This API is currently 🧪 experimental in nature and can change in future.
36743673
"""
36753674

36763675
def __init__(self):

src/diffusers/models/auto_model.py

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -118,11 +118,10 @@ def from_pretrained(cls, pretrained_model_or_path: Optional[Union[str, os.PathLi
118118
trust_remote_cocde (`bool`, *optional*, defaults to `False`):
119119
Whether to trust remote code
120120
121-
> [!TIP]
122-
> To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with `hf
123-
> auth login`. You can also activate the special
124-
> ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a
125-
> firewalled environment.
121+
> [!TIP] > To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in
122+
with `hf > auth login`. You can also activate the special >
123+
["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use this method in a >
124+
firewalled environment.
126125
127126
Example:
128127

src/diffusers/models/autoencoders/autoencoder_kl.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -532,8 +532,7 @@ def fuse_qkv_projections(self):
532532
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
533533
are fused. For cross-attention modules, key and value projection matrices are fused.
534534
535-
> [!WARNING]
536-
> This API is 🧪 experimental.
535+
> [!WARNING] > This API is 🧪 experimental.
537536
"""
538537
self.original_attn_processors = None
539538

@@ -553,8 +552,7 @@ def fuse_qkv_projections(self):
553552
def unfuse_qkv_projections(self):
554553
"""Disables the fused QKV projection if enabled.
555554
556-
> [!WARNING]
557-
> This API is 🧪 experimental.
555+
> [!WARNING] > This API is 🧪 experimental.
558556
559557
"""
560558
if self.original_attn_processors is not None:

src/diffusers/models/controlnets/controlnet_sd3.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -270,8 +270,7 @@ def fuse_qkv_projections(self):
270270
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
271271
are fused. For cross-attention modules, key and value projection matrices are fused.
272272
273-
> [!WARNING]
274-
> This API is 🧪 experimental.
273+
> [!WARNING] > This API is 🧪 experimental.
275274
"""
276275
self.original_attn_processors = None
277276

@@ -291,8 +290,7 @@ def fuse_qkv_projections(self):
291290
def unfuse_qkv_projections(self):
292291
"""Disables the fused QKV projection if enabled.
293292
294-
> [!WARNING]
295-
> This API is 🧪 experimental.
293+
> [!WARNING] > This API is 🧪 experimental.
296294
297295
"""
298296
if self.original_attn_processors is not None:

src/diffusers/models/controlnets/controlnet_xs.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -980,8 +980,7 @@ def fuse_qkv_projections(self):
980980
Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
981981
are fused. For cross-attention modules, key and value projection matrices are fused.
982982
983-
> [!WARNING]
984-
> This API is 🧪 experimental.
983+
> [!WARNING] > This API is 🧪 experimental.
985984
"""
986985
self.original_attn_processors = None
987986

@@ -1001,8 +1000,7 @@ def fuse_qkv_projections(self):
10011000
def unfuse_qkv_projections(self):
10021001
"""Disables the fused QKV projection if enabled.
10031002
1004-
> [!WARNING]
1005-
> This API is 🧪 experimental.
1003+
> [!WARNING] > This API is 🧪 experimental.
10061004
10071005
"""
10081006
if self.original_attn_processors is not None:

src/diffusers/models/modeling_flax_utils.py

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -227,12 +227,9 @@ def from_pretrained(
227227
This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
228228
specified, all the computation will be performed with the given `dtype`.
229229
230-
> [!TIP]
231-
> This only specifies the dtype of the *computation* and does not influence the dtype of model
232-
> parameters.
233-
>
234-
> If you wish to change the dtype of the model parameters, see [`~FlaxModelMixin.to_fp16`] and
235-
> [`~FlaxModelMixin.to_bf16`].
230+
> [!TIP] > This only specifies the dtype of the *computation* and does not influence the dtype of model
231+
> parameters. > > If you wish to change the dtype of the model parameters, see
232+
[`~FlaxModelMixin.to_fp16`] and > [`~FlaxModelMixin.to_bf16`].
236233
237234
model_args (sequence of positional arguments, *optional*):
238235
All remaining positional arguments are passed to the underlying model's `__init__` method.

0 commit comments

Comments
 (0)