Skip to content

Commit

Permalink
fix(api): avoid loading encoder twice when using LoRAs and inversions…
Browse files Browse the repository at this point in the history
… together
  • Loading branch information
ssube committed Mar 18, 2023
1 parent 9f9b73b commit af326a7
Showing 1 changed file with 8 additions and 6 deletions.
14 changes: 8 additions & 6 deletions api/onnx_web/diffusers/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,14 +236,16 @@ def load_pipeline(
list(zip(inversion_models, inversion_weights, inversion_names)),
)

components["tokenizer"] = tokenizer

# should be pretty small and should not need external data
components["text_encoder"] = OnnxRuntimeModel(
OnnxRuntimeModel.load_model(
text_encoder.SerializeToString(),
provider=device.ort_provider(),
if loras is None or len(loras) == 0:
components["text_encoder"] = OnnxRuntimeModel(
OnnxRuntimeModel.load_model(
text_encoder.SerializeToString(),
provider=device.ort_provider(),
)
)
)
components["tokenizer"] = tokenizer

# test LoRA blending
if loras is not None and len(loras) > 0:
Expand Down

0 comments on commit af326a7

Please sign in to comment.