Skip to content

Commit

Permalink
Worked on issue #872: Updated files in examples folder
Browse files Browse the repository at this point in the history
  • Loading branch information
arp95 committed Aug 13, 2020
2 parents 541380b + 57f8dbf commit 672b942
Show file tree
Hide file tree
Showing 40 changed files with 1,784 additions and 1,906 deletions.
92 changes: 45 additions & 47 deletions examples/segmentation_3d/unet_evaluation_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@

import logging
import os
import shutil
import sys
import tempfile
from glob import glob
Expand All @@ -33,58 +32,57 @@ def main():
config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)

tempdir = tempfile.mkdtemp()
print(f"generating synthetic data to {tempdir} (this may take a while)")
for i in range(5):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1)
with tempfile.TemporaryDirectory() as tempdir:
print(f"generating synthetic data to {tempdir} (this may take a while)")
for i in range(5):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1)

n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))
n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))

n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))
n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))

images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))

# define transforms for image and segmentation
imtrans = Compose([ScaleIntensity(), AddChannel(), ToTensor()])
segtrans = Compose([AddChannel(), ToTensor()])
val_ds = NiftiDataset(images, segs, transform=imtrans, seg_transform=segtrans, image_only=False)
# sliding window inference for one image at every iteration
val_loader = DataLoader(val_ds, batch_size=1, num_workers=1, pin_memory=torch.cuda.is_available())
dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction="mean")
# define transforms for image and segmentation
imtrans = Compose([ScaleIntensity(), AddChannel(), ToTensor()])
segtrans = Compose([AddChannel(), ToTensor()])
val_ds = NiftiDataset(images, segs, transform=imtrans, seg_transform=segtrans, image_only=False)
# sliding window inference for one image at every iteration
val_loader = DataLoader(val_ds, batch_size=1, num_workers=1, pin_memory=torch.cuda.is_available())
dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction="mean")

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = UNet(
dimensions=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = UNet(
dimensions=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(device)

model.load_state_dict(torch.load("best_metric_model_array.pth"))
model.eval()
with torch.no_grad():
metric_sum = 0.0
metric_count = 0
saver = NiftiSaver(output_dir="./output")
for val_data in val_loader:
val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
# define sliding window size and batch size for windows inference
roi_size = (96, 96, 96)
sw_batch_size = 4
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
value = dice_metric(y_pred=val_outputs, y=val_labels)
metric_count += len(value)
metric_sum += value.item() * len(value)
val_outputs = (val_outputs.sigmoid() >= 0.5).float()
saver.save_batch(val_outputs, val_data[2])
metric = metric_sum / metric_count
print("evaluation metric:", metric)
shutil.rmtree(tempdir)
model.load_state_dict(torch.load("best_metric_model_array.pth"))
model.eval()
with torch.no_grad():
metric_sum = 0.0
metric_count = 0
saver = NiftiSaver(output_dir="./output")
for val_data in val_loader:
val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
# define sliding window size and batch size for windows inference
roi_size = (96, 96, 96)
sw_batch_size = 4
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
value = dice_metric(y_pred=val_outputs, y=val_labels)
metric_count += len(value)
metric_sum += value.item() * len(value)
val_outputs = (val_outputs.sigmoid() >= 0.5).float()
saver.save_batch(val_outputs, val_data[2])
metric = metric_sum / metric_count
print("evaluation metric:", metric)


if __name__ == "__main__":
Expand Down
130 changes: 64 additions & 66 deletions examples/segmentation_3d/unet_evaluation_dict.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@

import logging
import os
import shutil
import sys
import tempfile
from glob import glob
Expand All @@ -34,71 +33,70 @@ def main():
monai.config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)

tempdir = tempfile.mkdtemp()
print(f"generating synthetic data to {tempdir} (this may take a while)")
for i in range(5):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)

n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))

n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))

images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]

# define transforms for image and segmentation
val_transforms = Compose(
[
LoadNiftid(keys=["img", "seg"]),
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
ScaleIntensityd(keys="img"),
ToTensord(keys=["img", "seg"]),
]
)
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
# sliding window inference need to input 1 image in every iteration
val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate)
dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction="mean")

# try to use all the available GPUs
devices = get_devices_spec(None)
model = UNet(
dimensions=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(devices[0])

model.load_state_dict(torch.load("best_metric_model_dict.pth"))

# if we have multiple GPUs, set data parallel to execute sliding window inference
if len(devices) > 1:
model = torch.nn.DataParallel(model, device_ids=devices)

model.eval()
with torch.no_grad():
metric_sum = 0.0
metric_count = 0
saver = NiftiSaver(output_dir="./output")
for val_data in val_loader:
val_images, val_labels = val_data["img"].to(devices[0]), val_data["seg"].to(devices[0])
# define sliding window size and batch size for windows inference
roi_size = (96, 96, 96)
sw_batch_size = 4
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
value = dice_metric(y_pred=val_outputs, y=val_labels)
metric_count += len(value)
metric_sum += value.item() * len(value)
val_outputs = (val_outputs.sigmoid() >= 0.5).float()
saver.save_batch(val_outputs, val_data["img_meta_dict"])
metric = metric_sum / metric_count
print("evaluation metric:", metric)
shutil.rmtree(tempdir)
with tempfile.TemporaryDirectory() as tempdir:
print(f"generating synthetic data to {tempdir} (this may take a while)")
for i in range(5):
im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1, channel_dim=-1)

n = nib.Nifti1Image(im, np.eye(4))
nib.save(n, os.path.join(tempdir, f"im{i:d}.nii.gz"))

n = nib.Nifti1Image(seg, np.eye(4))
nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))

images = sorted(glob(os.path.join(tempdir, "im*.nii.gz")))
segs = sorted(glob(os.path.join(tempdir, "seg*.nii.gz")))
val_files = [{"img": img, "seg": seg} for img, seg in zip(images, segs)]

# define transforms for image and segmentation
val_transforms = Compose(
[
LoadNiftid(keys=["img", "seg"]),
AsChannelFirstd(keys=["img", "seg"], channel_dim=-1),
ScaleIntensityd(keys="img"),
ToTensord(keys=["img", "seg"]),
]
)
val_ds = monai.data.Dataset(data=val_files, transform=val_transforms)
# sliding window inference need to input 1 image in every iteration
val_loader = DataLoader(val_ds, batch_size=1, num_workers=4, collate_fn=list_data_collate)
dice_metric = DiceMetric(include_background=True, to_onehot_y=False, sigmoid=True, reduction="mean")

# try to use all the available GPUs
devices = get_devices_spec(None)
model = UNet(
dimensions=3,
in_channels=1,
out_channels=1,
channels=(16, 32, 64, 128, 256),
strides=(2, 2, 2, 2),
num_res_units=2,
).to(devices[0])

model.load_state_dict(torch.load("best_metric_model_dict.pth"))

# if we have multiple GPUs, set data parallel to execute sliding window inference
if len(devices) > 1:
model = torch.nn.DataParallel(model, device_ids=devices)

model.eval()
with torch.no_grad():
metric_sum = 0.0
metric_count = 0
saver = NiftiSaver(output_dir="./output")
for val_data in val_loader:
val_images, val_labels = val_data["img"].to(devices[0]), val_data["seg"].to(devices[0])
# define sliding window size and batch size for windows inference
roi_size = (96, 96, 96)
sw_batch_size = 4
val_outputs = sliding_window_inference(val_images, roi_size, sw_batch_size, model)
value = dice_metric(y_pred=val_outputs, y=val_labels)
metric_count += len(value)
metric_sum += value.item() * len(value)
val_outputs = (val_outputs.sigmoid() >= 0.5).float()
saver.save_batch(val_outputs, val_data["img_meta_dict"])
metric = metric_sum / metric_count
print("evaluation metric:", metric)


if __name__ == "__main__":
Expand Down
Loading

0 comments on commit 672b942

Please sign in to comment.