Skip to content

Commit

Permalink
Merge pull request #37 from McGill-NLP/supervised-training
Browse files Browse the repository at this point in the history
Supervised training
  • Loading branch information
vaibhavad committed Apr 30, 2024
2 parents c6fe19b + a0eb38b commit edcfa7a
Show file tree
Hide file tree
Showing 16 changed files with 1,013 additions and 34 deletions.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,5 @@ dist/
*.egg-info
**/__pycache__
wandb/**
output/**
output/**
cache/**
13 changes: 7 additions & 6 deletions experiments/run_mntp.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,11 +108,10 @@ def initialize_peft(
bias="none",
task_type=None,
)
# model organization is MODEL_TYPEBiForMNTP.model -> MODEL_TYPELBiModel, we have to apply PEFT to the inner model
peft_model = get_peft_model(model.get_model_for_peft(), config)

model = get_peft_model(model, config)
print(f"Model's Lora trainable parameters:")
peft_model.print_trainable_parameters()
model.set_model_for_peft(peft_model)
model.print_trainable_parameters()
return model


Expand Down Expand Up @@ -696,8 +695,10 @@ def main():
low_cpu_mem_usage=model_args.low_cpu_mem_usage,
attn_implementation=model_args.attn_implementation,
)
model = initialize_peft(
model,

# model organization is MODEL_TYPEBiForMNTP.model -> MODEL_TYPELBiModel, we have to apply PEFT to the inner model
model.model = initialize_peft(
model.model,
lora_r=custom_args.lora_r,
lora_alpha=2 * custom_args.lora_r,
lora_dropout=custom_args.lora_dropout,
Expand Down

0 comments on commit edcfa7a

Please sign in to comment.