Skip to content
This repository has been archived by the owner on Feb 25, 2021. It is now read-only.

Commit

Permalink
Debug camelyon16
Browse files Browse the repository at this point in the history
  • Loading branch information
wilmerwang committed Mar 11, 2019
1 parent 3c7b54d commit 6447256
Show file tree
Hide file tree
Showing 18 changed files with 308 additions and 37 deletions.
6 changes: 4 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,10 @@

## Notes

- [x] **extras/CNNRF 是使用keras进行建模的相近项目**
- [ ] Debug camelyon16
- [x] extras/CNNRF 是使用keras训练的相近项目
- [x] Debug camelyon16
- [] extract\_feature\_probsmap.py
- [] wsi_classification.py
- [ ] Debug extras/CNNRF

## Requisetes
Expand Down
2 changes: 1 addition & 1 deletion camelyon16/bin/Evaluation_FROC.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,4 +256,4 @@ def plotFROC(total_FPs, total_sensitivity):
print('Avg FP = ', str(eval_threshold[i]))
print('Sensitivity = ', str(eval_TPs[i]))

print('Avg Sensivity = ', np.mean(eval_TPs))
print('Avg Sensivity = ', np.mean(eval_TPs))
2 changes: 1 addition & 1 deletion camelyon16/bin/camelyon16xml2json.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../../')

from nnxgb.data.annotation import Formatter # noqa
from camelyon16.data.annotation import Formatter # noqa

parser = argparse.ArgumentParser(description='Convert Camelyon16 xml format to'
'internal json format')
Expand Down
9 changes: 5 additions & 4 deletions camelyon16/bin/extract_feature_probsmap.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@

sys.path.append(os.path.dirname(os.path.abspath(__file__) + '/../../'))

from data.probs_ops import extract_features
from camelyon16.data.probs_ops import extractor_features

parser = argparse.ArgumentParser(description='Extract features from probability map'
'for slide classification')
Expand All @@ -34,6 +34,7 @@ def compute_features(extractor):

probs_map_threshold_p90 = extractor.probs_map_set_p(0.9)
probs_map_threshold_p50 = extractor.probs_map_set_p(0.5)

region_props_p90 = extractor.get_region_props(probs_map_threshold_p90)
region_props_p50 = extractor.get_region_props(probs_map_threshold_p50)

Expand All @@ -44,7 +45,7 @@ def compute_features(extractor):
features.append(f_percentage_tumor_over_tissue_region)

largest_tumor_region_index_t50 = extractor.get_largest_tumor_index(region_props_p50)
f_area_largest_tumor_region_t50 = extractor.region_props_t50[largest_tumor_region_index_t50].area # 3
f_area_largest_tumor_region_t50 = region_props_p50[largest_tumor_region_index_t50].area # 3
features.append(f_area_largest_tumor_region_t50)

f_longest_axis_largest_tumor_region_t50 = extractor.get_longest_axis_in_largest_tumor_region(region_props_p50,
Expand Down Expand Up @@ -76,11 +77,11 @@ def compute_features(extractor):


def run(args):
slide = openslide.OpenSlide(args.wsi_path)
slide_path = args.wsi_path

probs_map = np.load(args.probs_map_path)

extractor = extract_features(probs_map, slide)
extractor = extractor_features(probs_map, slide_path)

features = compute_features(extractor)

Expand Down
3 changes: 2 additions & 1 deletion camelyon16/bin/json2camelyon16xml.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')

from nnxgb.data.annotation import Formatter # noqa
from camelyon16.data.annotation import Formatter # noqa

parser = argparse.ArgumentParser(description='Convert My json format to'
'ASAP json format')
Expand All @@ -15,6 +15,7 @@
parser.add_argument('color', default=None, metavar='COLOR', nargs='+', type=str,
help='The polygon part of color')


def run(args):
with open(args.json_path) as f:
dict = json.load(f)
Expand Down
2 changes: 1 addition & 1 deletion camelyon16/bin/nms.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,4 +63,4 @@ def main():


if __name__ == '__main__':
main()
main()
3 changes: 2 additions & 1 deletion camelyon16/bin/non_tumor_mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,5 +31,6 @@ def main():
args = parser.parse_args()
run(args)


if __name__ == "__main__":
main()
main()
2 changes: 1 addition & 1 deletion camelyon16/bin/patch_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
type=str, help='Path to the input list of coordinates')
parser.add_argument('patch_path', default=None, metavar='PATCH_PATH', type=str,
help='Path to the output directory of patch images')
parser.add_argument('--patch_size', default=768, type=int, help='patch size, '
parser.add_argument('--patch_size', default=256, type=int, help='patch size, '
'default 768')
parser.add_argument('--level', default=0, type=int, help='level for WSI, to '
'generate patches, default 0')
Expand Down
13 changes: 12 additions & 1 deletion camelyon16/bin/probs_map.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from torch.utils.data import DataLoader
from torch.autograd import Variable
from torchvision import models
from torch import nn

sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')

Expand Down Expand Up @@ -42,6 +43,14 @@
' default 0, which means disabled')


def chose_model(mod):
if mod == 'resnet18':
model = models.resnet18(pretrained=False)
else:
raise Exception("I have not add any models. ")
return model


def get_probs_map(model, dataloader):
probs_map = np.zeros(dataloader.dataset._mask.shape)
num_batch = len(dataloader)
Expand Down Expand Up @@ -96,7 +105,9 @@ def run(args):

mask = np.load(args.mask_path)
ckpt = torch.load(args.ckpt_path)
model = models[cnn['model']]()
model = chose_model(cnn['model'])
fc_features = model.fc.in_features
model.fc = nn.Linear(fc_features, 1)
model.load_state_dict(ckpt['state_dict'])
model = model.cuda().eval()

Expand Down
2 changes: 2 additions & 0 deletions camelyon16/bin/sampled_spot_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,11 +58,13 @@ def run(args):
with open(txt_path, "a") as f:
np.savetxt(f, center_points, fmt="%s", delimiter=",")


def main():
logging.basicConfig(level=logging.INFO)

args = parser.parse_args()
run(args)


if __name__ == "__main__":
main()
2 changes: 1 addition & 1 deletion camelyon16/bin/tissue_mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,4 +55,4 @@ def main():


if __name__ == '__main__':
main()
main()
66 changes: 44 additions & 22 deletions camelyon16/bin/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,13 @@
from torch.nn import BCEWithLogitsLoss, DataParallel
from torch.optim import SGD
from torchvision import models
from torchvision.datasets import ImageFolder
from torch import nn

from tensorboardX import SummaryWriter

sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../')

from camelyon16.data.image_producer import ImageDataset
torch.manual_seed(0)
torch.cuda.manual_seed_all(0)

Expand All @@ -33,6 +34,14 @@
' and GPU_1, default 0.')


def chose_model(cnn):
if cnn['model'] == 'resnet18':
model = models.resnet18(pretrained=False)
else:
raise Exception("I have not add any models. ")
return model


def train_epoch(summary, summary_writer, cnn, model, loss_fn, optimizer,
dataloader_train):
model.train()
Expand All @@ -44,10 +53,11 @@ def train_epoch(summary, summary_writer, cnn, model, loss_fn, optimizer,
time_now = time.time()
for step in range(steps):
data_train, target_train = next(dataiter_train)
data_train = Variable(data_train.cuda(async=True))
target_train = Variable(target_train.cuda(async=True))
data_train = Variable(data_train.float().cuda(async=True))
target_train = Variable(target_train.float().cuda(async=True))

output = model(data_train)
output = torch.squeeze(output) # noqa
loss = loss_fn(output, target_train)

optimizer.zero_grad()
Expand All @@ -56,9 +66,10 @@ def train_epoch(summary, summary_writer, cnn, model, loss_fn, optimizer,

probs = output.sigmoid()
predicts = (probs >= 0.5).type(torch.cuda.FloatTensor)

acc_data = (predicts == target_train).type(
torch.cuda.FloatTensor).sum().data[0] * 1.0 / batch_size
loss_data = loss.data[0]
torch.cuda.FloatTensor).sum().data * 1.0 / batch_size
loss_data = loss.data

time_spent = time.time() - time_now
logging.info(
Expand All @@ -74,9 +85,9 @@ def train_epoch(summary, summary_writer, cnn, model, loss_fn, optimizer,
summary_writer.add_scalar('train/loss', loss_data, summary['step'])
summary_writer.add_scalar('train/acc', acc_data, summary['step'])

summary['step'] += 1
summary['epoch'] += 1

return summary
return summary


def valid_epoch(summary, model, loss_fn,
Expand All @@ -91,20 +102,21 @@ def valid_epoch(summary, model, loss_fn,
acc_sum = 0
for step in range(steps):
data_valid, target_valid = next(dataiter_valid)
data_valid = Variable(data_valid.cuda(async=True), volatile=True)
target_valid = Variable(target_valid.cuda(async=True))
data_valid = Variable(data_valid.float().cuda(async=True), volatile=True)
target_valid = Variable(target_valid.float().cuda(async=True))

output = model(data_valid)
output = torch.squeeze(output) # important
loss = loss_fn(output, target_valid)

probs = output.sigmoid()
predicts = (probs >= 0.5).type(torch.cuda.FloatTensor)
acc_data = (predicts == target_valid).tpye(
torch.cuda.FloatTensor).sum().data[0] * 1.0 / batch_size
loss_data = loss.data[0]
acc_data = (predicts == target_valid).type(
torch.cuda.FloatTensor).sum().data * 1.0 / batch_size
loss_data = loss.data

loss_sum += loss_data
acc_sum += loss_data
acc_sum += acc_data

summary['loss'] = loss_sum / steps
summary['acc'] = acc_sum / steps
Expand All @@ -113,7 +125,7 @@ def valid_epoch(summary, model, loss_fn,


def run(args):
with open(args.cnn_path) as f:
with open(args.cnn_path, 'r') as f:
cnn = json.load(f)

if not os.path.exists(args.save_path):
Expand All @@ -128,14 +140,24 @@ def run(args):
batch_size_valid = cnn['batch_size'] * num_GPU
num_workers = args.num_workers * num_GPU

model = models.cnn['model'](pretrained=True)
model = chose_model(cnn)
fc_features = model.fc.in_features
model.fc = nn.Linear(fc_features, 1) # 须知
model = DataParallel(model, device_ids=None)
model = model.cuda()
loss_fn = BCEWithLogitsLoss().cuda()
optimizer = SGD(model.parameters(), lr=cnn['lf'], momentum=cnn['momentum'])

dataset_train = ImageFolder(cnn['data_path_train'])
dataset_valid = ImageFolder(cnn['data_path_valid'])
optimizer = SGD(model.parameters(), lr=cnn['lr'], momentum=cnn['momentum'])

# dataset_train = ImageFolder(cnn['data_path_train'])
# dataset_valid = ImageFolder(cnn['data_path_valid'])
dataset_train = ImageDataset(cnn['data_path_train'],
cnn['image_size'],
cnn['crop_size'],
cnn['normalize'])
dataset_valid = ImageDataset(cnn['data_path_valid'],
cnn['image_size'],
cnn['crop_size'],
cnn['normalize'])

dataloader_train = DataLoader(dataset_train,
batch_size=batch_size_train,
Expand All @@ -145,9 +167,9 @@ def run(args):
num_workers=num_workers)

summary_train = {'epoch': 0, 'step': 0}
summary_valid = {'loss': float('int'), 'acc': 0}
summary_valid = {'loss': float('inf'), 'acc': 0}
summary_writer = SummaryWriter(args.save_path)
loss_valid_best = float('int')
loss_valid_best = float('inf')
for epoch in range(cnn['epoch']):
summary_train = train_epoch(summary_train, summary_writer, cnn, model,
loss_fn, optimizer,
Expand All @@ -170,7 +192,7 @@ def run(args):
summary_valid['acc'], time_spent))

summary_writer.add_scalar('valid/loss',
summary_valid['loss'],summary_train['step'])
summary_valid['loss'], summary_train['step'])
summary_writer.add_scalar('valid/acc',
summary_valid['acc'], summary_train['step'])

Expand Down
2 changes: 1 addition & 1 deletion camelyon16/bin/tumor_mask.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
help='Path to the JSON file')
parser.add_argument('npy_path', default=None, metavar='NPY_PATH', type=str,
help='Path to the output npy mask file')
parser.add_argument('--level', default=2, type=int, help='at which WSI level'
parser.add_argument('--level', default=6, type=int, help='at which WSI level'
' to obtain the mask, default 6')


Expand Down
Empty file removed camelyon16/configs/Densnet.json
Empty file.
13 changes: 13 additions & 0 deletions camelyon16/configs/cnn.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
{
"model": "resnet18",
"batch_size": 10,
"image_size": 256,
"crop_size": 224,
"normalize": "True",
"lr": 0.001,
"momentum": 0.9,
"data_path_train": "/home/qianslab/likewind/github/CAMELYON16/wpatch/",
"data_path_valid": "/home/qianslab/likewind/github/CAMELYON16/wpatch/",
"epoch": 50,
"log_every": 5
}

0 comments on commit 6447256

Please sign in to comment.