-
Notifications
You must be signed in to change notification settings - Fork 7
/
test_stage1.py
104 lines (91 loc) · 3.82 KB
/
test_stage1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
# coding=utf-8
import pdb
import torch
import torch.nn.functional as F
import torchvision
from tqdm import tqdm
from PIL import Image
import numpy as np
from datasets import VOC, Saliency
from datasets import palette as palette_voc
from evaluate_seg import evaluate_iou
from evaluate_sal import fm_and_mae
import json
import os
from jls_fcn import JLSFCN
from logger import Logger
image_size = 256
batch_size = 8
c_output = 21
experiment_name = "debug5"
path_save_checkpoints = "./stage1.pth"
path_save_valid_voc = "output/validation/{}_voc".format(experiment_name)
if not os.path.exists(path_save_valid_voc): os.mkdir(path_save_valid_voc)
path_save_valid_sal = "output/validation/{}_sal".format(experiment_name)
if not os.path.exists(path_save_valid_sal): os.mkdir(path_save_valid_sal)
net = JLSFCN(c_output).cuda()
net.load_state_dict(torch.load(path_save_checkpoints))
mean = torch.Tensor([0.485, 0.456, 0.406])[None, ..., None, None].cuda()
std = torch.Tensor([0.229, 0.224, 0.225])[None, ..., None, None].cuda()
voc_val_img_dir = '/home/zeng/data/datasets/segmentation/VOCdevkit/VOC2012/JPEGImages'
voc_val_gt_dir = '/home/zeng/data/datasets/segmentation/VOCdevkit/VOC2012/SegmentationClass'
voc_val_split = '/home/zeng/data/datasets/segmentation/VOCdevkit/VOC2012/ImageSets/Segmentation/val.txt'
sal_val_img_dir = '/home/zeng/data/datasets/saliency/ECSSD/images'
sal_val_gt_dir = '/home/zeng/data/datasets/saliency/ECSSD/masks'
sal_val_loader = torch.utils.data.DataLoader(
Saliency(sal_val_img_dir, sal_val_gt_dir,
crop=None, flip=False, rotate=None, size=image_size, training=False),
batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
voc_val_loader = torch.utils.data.DataLoader(
VOC(voc_val_img_dir, voc_val_gt_dir, voc_val_split,
crop=None, flip=False, rotate=None, size=image_size, training=False),
batch_size=batch_size, shuffle=True, num_workers=4, pin_memory=True)
def val_sal():
net.eval()
with torch.no_grad():
for it, (img, gt, batch_name, WW, HH) in tqdm(enumerate(sal_val_loader), desc='train'):
img = (img.cuda()-mean)/std
pred_seg, v_sal, _ = net(img)
pred_seg = torch.softmax(pred_seg, 1)
bg = pred_seg[:, :1]
fg = (pred_seg[:, 1:]*v_sal[:, 1:]).sum(1, keepdim=True)
fg = fg.squeeze(1)
fg = fg*255
for n, name in enumerate(batch_name):
msk =fg[n]
msk = msk.detach().cpu().numpy()
w = WW[n]
h = HH[n]
msk = Image.fromarray(msk.astype(np.uint8))
msk = msk.resize((w, h))
msk.save('{}/{}.png'.format(path_save_valid_sal, name), 'PNG')
fm, mae, _, _ = fm_and_mae(path_save_valid_sal, sal_val_gt_dir)
net.train()
return fm, mae
def val_voc():
net.eval()
with torch.no_grad():
for it, (img, gt, batch_name, WW, HH) in tqdm(enumerate(voc_val_loader), desc='train'):
img = (img.cuda()-mean)/std
outputs = net(img)
batch_seg = outputs[0]
_, batch_seg = batch_seg.detach().max(1)
for n, name in enumerate(batch_name):
msk =batch_seg[n]
msk = msk.detach().cpu().numpy()
w = WW[n]
h = HH[n]
msk = Image.fromarray(msk.astype(np.uint8))
msk = msk.convert('P')
msk.putpalette(palette_voc)
msk = msk.resize((w, h))
msk.save('{}/{}.png'.format(path_save_valid_voc, name), 'PNG')
miou = evaluate_iou(path_save_valid_voc, voc_val_gt_dir, c_output)
net.train()
return miou
if __name__ == "__main__":
fm, mae = val_sal()
print(fm, mae)
#net.load_state_dict(torch.load("output/checkpoints/debug/500.pth"))
#miou = val()
#print(miou)