Skip to content

Commit

Permalink
Merge pull request #243 from modelscope/flux-lora
Browse files Browse the repository at this point in the history
support preset lora
  • Loading branch information
Artiprocher authored Oct 21, 2024
2 parents ed71184 + a403cb0 commit 72ed76e
Showing 1 changed file with 10 additions and 1 deletion.
11 changes: 10 additions & 1 deletion examples/train/flux/train_flux_lora.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
class LightningModel(LightningModelForT2ILoRA):
def __init__(
self,
torch_dtype=torch.float16, pretrained_weights=[],
torch_dtype=torch.float16, pretrained_weights=[], preset_lora_path=None,
learning_rate=1e-4, use_gradient_checkpointing=True,
lora_rank=4, lora_alpha=4, lora_target_modules="to_q,to_k,to_v,to_out", init_lora_weights="kaiming",
state_dict_converter=None, quantize = None
Expand All @@ -21,6 +21,8 @@ def __init__(
else:
model_manager.load_models(pretrained_weights[1:])
model_manager.load_model(pretrained_weights[0], torch_dtype=quantize)
if preset_lora_path is not None:
model_manager.load_lora(preset_lora_path)

self.pipe = FluxImagePipeline.from_model_manager(model_manager)

Expand Down Expand Up @@ -82,6 +84,12 @@ def parse_args():
choices=["float8_e4m3fn"],
help="Whether to use quantization when training the model, and in which format.",
)
parser.add_argument(
"--preset_lora_path",
type=str,
default=None,
help="Preset LoRA path.",
)
parser = add_general_parsers(parser)
args = parser.parse_args()
return args
Expand All @@ -92,6 +100,7 @@ def parse_args():
model = LightningModel(
torch_dtype={"32": torch.float32, "bf16": torch.bfloat16}.get(args.precision, torch.float16),
pretrained_weights=[args.pretrained_dit_path, args.pretrained_text_encoder_path, args.pretrained_text_encoder_2_path, args.pretrained_vae_path],
preset_lora_path=args.preset_lora_path,
learning_rate=args.learning_rate,
use_gradient_checkpointing=args.use_gradient_checkpointing,
lora_rank=args.lora_rank,
Expand Down

0 comments on commit 72ed76e

Please sign in to comment.