Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion ci/run_premerge_multi_gpu.sh
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ verify_bundle() {
bash $extra_script
fi
# do multi gpu based unit tests
pipenv run python $(pwd)/ci/unit_tests/runner.py --b "$bundle" --dist True
pipenv run torchrun $(pwd)/ci/unit_tests/runner.py --b "$bundle" --dist True
remove_pipenv
done
else
Expand Down
167 changes: 167 additions & 0 deletions ci/unit_tests/test_brats_mri_axial_slices_generative_diffusion.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,167 @@
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
import os
import shutil
import sys
import tempfile
import unittest

import nibabel as nib
import numpy as np
from monai.bundle import ConfigWorkflow
from parameterized import parameterized

TEST_CASE_1 = [
{
"bundle_root": "models/brats_mri_axial_slices_generative_diffusion",
"images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))",
"labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))",
"train#trainer#max_epochs": 1,
"train#dataset#cache_rate": 0.0,
"train_batch_size_slice": 4,
}
]

TEST_CASE_2 = [{"bundle_root": "models/brats_mri_axial_slices_generative_diffusion"}]


def test_order(test_name1, test_name2):
# specify test order.
# The "train_autoencoder.json" config should be tested first in order to
# produce model weights for inference, and train diffusion.
def get_order(name):
if "autoencoder" in name:
return 1 if "train" in name else 2
if "diffusion" in name:
return 3 if "train" in name else 4
return 5

return get_order(test_name1) - get_order(test_name2)


class TestLdm2d(unittest.TestCase):
def setUp(self):
self.dataset_dir = tempfile.mkdtemp()
dataset_size = 10
input_shape = (256, 256, 112)
sub_dir = os.path.join(self.dataset_dir, "Task01_BrainTumour")
os.makedirs(sub_dir)
data_list = []
for s in range(dataset_size):
test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8)
test_label = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8)
image_filename = os.path.join(self.dataset_dir, f"image_{s}.nii.gz")
label_filename = os.path.join(self.dataset_dir, f"label_{s}.nii.gz")
nib.save(nib.Nifti1Image(test_image, np.eye(4)), image_filename)
nib.save(nib.Nifti1Image(test_label, np.eye(4)), label_filename)
sample_dict = {"image": image_filename, "label": label_filename}
data_list.append(sample_dict)
# prepare a datalist file that "monai.apps.DecathlonDataset" requires
full_dict = {
"name": "",
"description": "",
"reference": "",
"licence": "",
"tensorImageSize": "",
"modality": "",
"labels": "",
"numTraining": 10,
"numTest": 0,
"training": data_list,
}
with open(os.path.join(sub_dir, "dataset.json"), "w") as f:
json.dump(full_dict, f)

def tearDown(self):
shutil.rmtree(self.dataset_dir)

@parameterized.expand([TEST_CASE_1])
def test_autoencoder_train(self, override):
override["dataset_dir"] = self.dataset_dir
bundle_root = override["bundle_root"]
sys.path = [bundle_root] + sys.path

trainer = ConfigWorkflow(
workflow="train",
config_file=os.path.join(bundle_root, "configs/train_autoencoder.json"),
logging_file=os.path.join(bundle_root, "configs/logging.conf"),
meta_file=os.path.join(bundle_root, "configs/metadata.json"),
**override,
)
trainer.initialize()
trainer.run()
trainer.finalize()

@parameterized.expand([TEST_CASE_2])
def test_autoencoder_infer(self, override):
override["dataset_dir"] = self.dataset_dir
bundle_root = override["bundle_root"]
sys.path = [bundle_root] + sys.path

inferrer = ConfigWorkflow(
workflow="infer",
config_file=os.path.join(bundle_root, "configs/inference_autoencoder.json"),
logging_file=os.path.join(bundle_root, "configs/logging.conf"),
meta_file=os.path.join(bundle_root, "configs/metadata.json"),
**override,
)
inferrer.initialize()
inferrer.run()
inferrer.finalize()

@parameterized.expand([TEST_CASE_1])
def test_diffusion_train(self, override):
override["dataset_dir"] = self.dataset_dir
bundle_root = override["bundle_root"]
sys.path = [bundle_root] + sys.path
autoencoder_file = os.path.join(bundle_root, "configs/train_autoencoder.json")
diffusion_file = os.path.join(bundle_root, "configs/train_diffusion.json")

trainer = ConfigWorkflow(
workflow="train",
config_file=[autoencoder_file, diffusion_file],
logging_file=os.path.join(bundle_root, "configs/logging.conf"),
meta_file=os.path.join(bundle_root, "configs/metadata.json"),
**override,
)
trainer.initialize()
# TODO: uncomment the following check after we have monai > 1.2.0
# https://github.com/Project-MONAI/MONAI/issues/6602
# check_result = trainer.check_properties()
# if check_result is not None and len(check_result) > 0:
# raise ValueError(f"check properties for overrided train config failed: {check_result}")
trainer.run()
trainer.finalize()

@parameterized.expand([TEST_CASE_2])
def test_diffusion_infer(self, override):
override["dataset_dir"] = self.dataset_dir
bundle_root = override["bundle_root"]
sys.path = [bundle_root] + sys.path

inferrer = ConfigWorkflow(
workflow="infer",
config_file=os.path.join(bundle_root, "configs/inference.json"),
logging_file=os.path.join(bundle_root, "configs/logging.conf"),
meta_file=os.path.join(bundle_root, "configs/metadata.json"),
**override,
)
inferrer.initialize()
inferrer.run()
inferrer.finalize()


if __name__ == "__main__":
loader = unittest.TestLoader()
loader.sortTestMethodsUsing = test_order
unittest.main(testLoader=loader)
102 changes: 102 additions & 0 deletions ci/unit_tests/test_brats_mri_axial_slices_generative_diffusion_dist.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
import os
import shutil
import tempfile
import unittest

import nibabel as nib
import numpy as np
import torch
from parameterized import parameterized
from utils import export_config_and_run_mgpu_cmd

TEST_CASE_1 = [
{
"bundle_root": "models/brats_mri_axial_slices_generative_diffusion",
"images": "$list(sorted(glob.glob(@dataset_dir + '/image_*.nii.gz')))",
"labels": "$list(sorted(glob.glob(@dataset_dir + '/label_*.nii.gz')))",
"train#trainer#max_epochs": 1,
"train#dataset#cache_rate": 0.0,
"train_batch_size_slice": 4,
}
]


class TestLdm2dMGPU(unittest.TestCase):
def setUp(self):
self.dataset_dir = tempfile.mkdtemp()
dataset_size = 10
input_shape = (256, 256, 112)
sub_dir = os.path.join(self.dataset_dir, "Task01_BrainTumour")
os.makedirs(sub_dir)
data_list = []
for s in range(dataset_size):
test_image = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8)
test_label = np.random.randint(low=0, high=2, size=input_shape).astype(np.int8)
image_filename = os.path.join(self.dataset_dir, f"image_{s}.nii.gz")
label_filename = os.path.join(self.dataset_dir, f"label_{s}.nii.gz")
nib.save(nib.Nifti1Image(test_image, np.eye(4)), image_filename)
nib.save(nib.Nifti1Image(test_label, np.eye(4)), label_filename)
sample_dict = {"image": image_filename, "label": label_filename}
data_list.append(sample_dict)
# prepare a datalist file that "monai.apps.DecathlonDataset" requires
full_dict = {
"name": "",
"description": "",
"reference": "",
"licence": "",
"tensorImageSize": "",
"modality": "",
"labels": "",
"numTraining": 10,
"numTest": 0,
"training": data_list,
}
with open(os.path.join(sub_dir, "dataset.json"), "w") as f:
json.dump(full_dict, f)

def tearDown(self):
shutil.rmtree(self.dataset_dir)

@parameterized.expand([TEST_CASE_1])
def test_mgpu(self, override):
override["dataset_dir"] = self.dataset_dir
bundle_root = override["bundle_root"]
autoencoder_file = os.path.join(bundle_root, "configs/train_autoencoder.json")
diffusion_file = os.path.join(bundle_root, "configs/train_diffusion.json")
mgpu_autoencoder_file = os.path.join(bundle_root, "configs/multi_gpu_train_autoencoder.json")
mgpu_diffusion_file = os.path.join(bundle_root, "configs/multi_gpu_train_diffusion.json")
n_gpu = torch.cuda.device_count()

export_config_and_run_mgpu_cmd(
config_file=[autoencoder_file, mgpu_autoencoder_file],
logging_file=os.path.join(bundle_root, "configs/logging.conf"),
meta_file=os.path.join(bundle_root, "configs/metadata.json"),
override_dict=override,
output_path=os.path.join(bundle_root, "configs/autoencoder_override.json"),
ngpu=n_gpu,
)

export_config_and_run_mgpu_cmd(
config_file=[autoencoder_file, diffusion_file, mgpu_autoencoder_file, mgpu_diffusion_file],
logging_file=os.path.join(bundle_root, "configs/logging.conf"),
meta_file=os.path.join(bundle_root, "configs/metadata.json"),
override_dict=override,
output_path=os.path.join(bundle_root, "configs/diffusion_override.json"),
ngpu=n_gpu,
)


if __name__ == "__main__":
unittest.main()
Loading