Skip to content

Commit

Permalink
Fixed examples test error
Browse files Browse the repository at this point in the history
Signed-off-by: Cheng, Penghui <penghui.cheng@intel.com>
  • Loading branch information
PenghuiCheng committed Dec 21, 2022
1 parent 642c2a5 commit cd40ec3
Show file tree
Hide file tree
Showing 6 changed files with 8 additions and 30 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -561,7 +561,7 @@ def eval_func_for_nc(model_tuned):
q_model = quantization.fit(model,
conf,
calib_dataloader=trainer.get_eval_dataloader(),
calib_func=eval_func_for_nc)
eval_func=eval_func_for_nc)
q_model.save(training_args.output_dir)
exit(0)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -907,10 +907,9 @@ def get_logits(teacher_model, train_dataset):

confs = []
if args.do_prune:
# Pruning!
from neural_compressor.config import Pruner, PruningConfig
pruner = Pruner(prune_type="pattern_lock")
p_conf = PruningConfig(pruners=[pruner])
from neural_compressor.config import WeightPruningConfig
import pdb;pdb.set_trace()
p_conf = WeightPruningConfig(pruning_type="pattern_lock")
confs.append(p_conf)

if args.do_quantization:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -534,7 +534,7 @@ def main():
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)

if args.distill_loss_weight > 0:
teacher_path = args.teacher_model_name_or_path
if teacher_path is None:
Expand Down Expand Up @@ -988,11 +988,9 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
start_step=pruning_start,
end_step=pruning_end
)
# pruner = Pruning(config)
# pruner.model = model
# pruner.on_train_begin()
compression_manager = prepare_compression(model=model, confs=config)
compression_manager.callbacks.on_train_begin()
model = compression_manager.model

for epoch in range(starting_epoch, args.num_train_epochs):
model.train()
Expand All @@ -1019,10 +1017,8 @@ def create_and_fill_np_array(start_or_end_logits, dataset, max_len):
loss = loss / args.gradient_accumulation_steps
accelerator.backward(loss)
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
# pruner.on_before_optimizer_step()
compression_manager.callbacks.on_before_optimizer_step()
optimizer.step()
# pruner.on_after_optimizer_step()
compression_manager.callbacks.on_after_optimizer_step()
lr_scheduler.step()
optimizer.zero_grad()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -642,7 +642,7 @@ def eval_func(model):
q_model = quantization.fit(model,
conf,
calib_dataloader=eval_dataloader,
calib_func=eval_func)
eval_func=eval_func)
q_model.save(training_args.output_dir)
return

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -643,7 +643,7 @@ def eval_func(model):
q_model = quantization.fit(model,
conf,
calib_dataloader=eval_dataloader,
calib_func=eval_func)
eval_func=eval_func)
q_model.save(training_args.output_dir)
return

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -62,23 +62,6 @@ function run_tuning {
--output_dir $tuned_checkpoint \
${extra_cmd}
fi
if [[ "${topology}" == "bert_large_1_10_ipex" ]]; then
pip install transformers==3.0.2
python run_qa_1_10.py \
--model_type bert \
--model_name_or_path $input_model \
--do_lower_case \
--predict_file $dataset_location \
--tokenizer_name $tokenizer_name \
--do_eval \
--max_seq_length 384 \
--doc_stride 128 \
--no_cuda \
--tune \
--output_dir $tuned_checkpoint \
--int8 \
--int8_fp32
fi
}

main "$@"

0 comments on commit cd40ec3

Please sign in to comment.