Skip to content
This repository was archived by the owner on Nov 2, 2024. It is now read-only.

Commit 41e4ed3

Browse files
committedMar 20, 2020
perf(svm): 更新训练参数,调整初始训练集正负样本比例为1:1
1 parent 1195632 commit 41e4ed3

File tree

1 file changed

+7
-5
lines changed

1 file changed

+7
-5
lines changed
 

‎py/linear_svm.py

+7-5
Original file line numberDiff line numberDiff line change
@@ -49,12 +49,12 @@ def load_data(data_root_dir):
4949
if name is 'train':
5050
"""
5151
使用hard negative mining方式
52-
初始正负样本比例为1:3,由于正样本数远小于负样本,所以以正样本数为基准,在负样本集中随机提取3倍的负样本作为初始负样本集
52+
初始正负样本比例为1:1。由于正样本数远小于负样本,所以以正样本数为基准,在负样本集中随机提取同样数目负样本作为初始负样本集
5353
"""
5454
positive_list = data_set.get_positives()
5555
negative_list = data_set.get_negatives()
5656

57-
init_negative_idxs = random.sample(range(len(negative_list)), len(positive_list) * 3)
57+
init_negative_idxs = random.sample(range(len(negative_list)), len(positive_list))
5858
init_negative_list = [negative_list[idx] for idx in range(len(negative_list)) if idx in init_negative_idxs]
5959
remain_negative_list = [negative_list[idx] for idx in range(len(negative_list))
6060
if idx not in init_negative_idxs]
@@ -266,9 +266,11 @@ def train_model(data_loaders, model, criterion, optimizer, lr_scheduler, num_epo
266266
model = model.to(device)
267267

268268
criterion = hinge_loss
269-
optimizer = optim.SGD(model.parameters(), lr=1e-3, momentum=0.9)
270-
lr_schduler = optim.lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
269+
# 由于初始训练集数量很少,所以降低学习率
270+
optimizer = optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
271+
# 共训练10轮,每隔4论减少一次学习率
272+
lr_schduler = optim.lr_scheduler.StepLR(optimizer, step_size=4, gamma=0.1)
271273

272-
best_model = train_model(data_loaders, model, criterion, optimizer, lr_schduler, num_epochs=25, device=device)
274+
best_model = train_model(data_loaders, model, criterion, optimizer, lr_schduler, num_epochs=10, device=device)
273275
# 保存最好的模型参数
274276
save_model(best_model, 'models/best_linear_svm_alexnet_car.pth')

0 commit comments

Comments
 (0)
Failed to load comments.