-
Notifications
You must be signed in to change notification settings - Fork 11
/
test.py
152 lines (125 loc) · 6.49 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
import os
import sys
from tqdm import tqdm
import logging
import numpy as np
import argparse
import random
import numpy as np
import torch
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from utils import test_single_volume
from importlib import import_module
from segment_anything import sam_model_registry
from datasets.dataset_synapse import Synapse_dataset
from icecream import ic
class_to_name = {1: 'spleen', 2: 'right kidney', 3: 'left kidney', 4: 'gallbladder', 5: 'liver', 6: 'stomach', 7: 'aorta', 8: 'pancreas'}
def inference(args, multimask_output, db_config, model, test_save_path=None):
db_test = db_config['Dataset'](base_dir=args.volume_path, list_dir=args.list_dir, split='test_vol')
testloader = DataLoader(db_test, batch_size=1, shuffle=False, num_workers=1)
logging.info(f'{len(testloader)} test iterations per epoch')
model.eval()
metric_list = 0.0
for i_batch, sampled_batch in tqdm(enumerate(testloader)):
h, w = sampled_batch['image'].shape[2:]
image, label, case_name = sampled_batch['image'], sampled_batch['label'], sampled_batch['case_name'][0]
metric_i = test_single_volume(image, label, model, classes=args.num_classes, multimask_output=multimask_output,
patch_size=[args.img_size, args.img_size], input_size=[args.input_size, args.input_size],
test_save_path=test_save_path, case=case_name, z_spacing=db_config['z_spacing'])
metric_list += np.array(metric_i)
logging.info('idx %d case %s mean_dice %f mean_hd95 %f' % (
i_batch, case_name, np.mean(metric_i, axis=0)[0], np.mean(metric_i, axis=0)[1]))
metric_list = metric_list / len(db_test)
for i in range(1, args.num_classes + 1):
try:
logging.info('Mean class %d name %s mean_dice %f mean_hd95 %f' % (i, class_to_name[i], metric_list[i - 1][0], metric_list[i - 1][1]))
except:
logging.info('Mean class %d mean_dice %f mean_hd95 %f' % (i, metric_list[i - 1][0], metric_list[i - 1][1]))
performance = np.mean(metric_list, axis=0)[0]
mean_hd95 = np.mean(metric_list, axis=0)[1]
logging.info('Testing performance : mean_dice : %f mean_hd95 : %f' % (performance, mean_hd95))
return 1
def config_to_dict(config):
items_dict = {}
with open(config, 'r') as f:
items = f.readlines()
for i in range(len(items)):
key, value = items[i].strip().split(': ')
items_dict[key] = value
return items_dict
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, default=None, help='The config file provided by the trained model')
parser.add_argument('--volume_path', type=str, default='/mnt/data3/chai/SAM/SAMed-main/test_vol_h5/')
parser.add_argument('--dataset', type=str, default='Synapse', help='Experiment name')
parser.add_argument('--num_classes', type=int, default=8)
parser.add_argument('--list_dir', type=str, default='./lists/lists_Synapse/', help='list_dir')
parser.add_argument('--output_dir', type=str, default='/mnt/data3/chai/SAM/SAMed-main/results')
parser.add_argument('--img_size', type=int, default=512, help='Input image size of the network')
parser.add_argument('--input_size', type=int, default=224, help='The input size for training SAM model')
parser.add_argument('--seed', type=int,
default=1234, help='random seed')
parser.add_argument('--is_savenii', action='store_true', help='Whether to save results during inference')
parser.add_argument('--deterministic', type=int, default=1, help='whether use deterministic training')
parser.add_argument('--ckpt', type=str, default='/mnt/data3/chai/SAM/sam_vit_b_01ec64.pth',
help='Pretrained checkpoint')
parser.add_argument('--my_ckpt', type=str, default='/mnt/data3/chai/SAM/SAMed-main/results/Synapse_LST_512_pretrain_vit_b_epo200_bs12_lr0.001/epoch_194.pth', help='The checkpoint from LoRA')
parser.add_argument('--vit_name', type=str, default='vit_b', help='Select one vit model')
parser.add_argument('--module', type=str, default='my_sam_LST')
args = parser.parse_args()
if args.config is not None:
# overwtite default configurations with config file\
config_dict = config_to_dict(args.config)
for key in config_dict:
setattr(args, key, config_dict[key])
if not args.deterministic:
cudnn.benchmark = True
cudnn.deterministic = False
else:
cudnn.benchmark = False
cudnn.deterministic = True
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
dataset_name = args.dataset
dataset_config = {
'Synapse': {
'Dataset': Synapse_dataset,
'volume_path': args.volume_path,
'list_dir': args.list_dir,
'num_classes': args.num_classes,
'z_spacing': 1
}
}
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# register model
sam, img_embedding_size = sam_model_registry[args.vit_name](image_size=args.img_size,
num_classes=args.num_classes,
checkpoint=args.ckpt, pixel_mean=[0, 0, 0],
pixel_std=[1, 1, 1])
# pkg = import_module(args.module)
# net = pkg.LoRA_Sam(sam, args.rank).cuda()
pkg = import_module(args.module)
# net = torch.nn.DataParallel(pkg.SAM_finetune()).cuda()
net = torch.nn.DataParallel(pkg.SAM_LST()).cuda()
# assert args.lora_ckpt is not None
if args.num_classes > 1:
multimask_output = True
else:
multimask_output = False
# initialize log
log_folder = os.path.join(args.output_dir, 'test_log')
os.makedirs(log_folder, exist_ok=True)
logging.basicConfig(filename=log_folder + '/' + 'log.txt', level=logging.INFO,
format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
logging.info(str(args))
if args.is_savenii:
test_save_path = os.path.join(args.output_dir, 'predictions')
os.makedirs(test_save_path, exist_ok=True)
else:
test_save_path = None
inference(args, multimask_output, dataset_config[dataset_name], net, test_save_path)