Skip to content
2 changes: 1 addition & 1 deletion monai/apps/auto3dseg/bundle_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
from monai.utils import ensure_tuple

logger = get_logger(module_name=__name__)
ALGO_HASH = os.environ.get("MONAI_ALGO_HASH", "d7bf36c")
ALGO_HASH = os.environ.get("MONAI_ALGO_HASH", "5895e1b")

__all__ = ["BundleAlgo", "BundleGen"]

Expand Down
32 changes: 20 additions & 12 deletions tests/test_auto3dseg_ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import nibabel as nib
import numpy as np
import torch

from monai.apps.auto3dseg import AlgoEnsembleBestByFold, AlgoEnsembleBestN, AlgoEnsembleBuilder, BundleGen, DataAnalyzer
from monai.bundle.config_parser import ConfigParser
Expand Down Expand Up @@ -44,14 +45,21 @@
],
}

train_param = {
"CUDA_VISIBLE_DEVICES": [0],
"num_iterations": 8,
"num_iterations_per_validation": 4,
"num_images_per_batch": 2,
"num_epochs": 2,
"num_warmup_iterations": 4,
}
num_gpus = 4 if torch.cuda.device_count() > 4 else torch.cuda.device_count()
train_param = (
{
"CUDA_VISIBLE_DEVICES": list(range(num_gpus)),
"num_iterations": int(4 / num_gpus),
"num_iterations_per_validation": int(4 / num_gpus),
"num_images_per_batch": 2,
"num_epochs": 1,
"num_warmup_iterations": int(4 / num_gpus),
"use_pretrain": False,
"pretrained_path": "",
}
if torch.cuda.is_available()
else {}
)

pred_param = {"files_slices": slice(0, 1), "mode": "mean", "sigmoid": True}

Expand Down Expand Up @@ -81,7 +89,7 @@ def test_ensemble(self) -> None:

# Generate a fake dataset
for d in fake_datalist["testing"] + fake_datalist["training"]:
im, seg = create_test_image_3d(64, 64, 64, rad_max=10, num_seg_classes=1)
im, seg = create_test_image_3d(24, 24, 24, rad_max=10, num_seg_classes=1)
nib_image = nib.Nifti1Image(im, affine=np.eye(4))
image_fpath = os.path.join(dataroot, d["image"])
nib.save(nib_image, image_fpath)
Expand Down Expand Up @@ -114,7 +122,7 @@ def test_ensemble(self) -> None:
bundle_generator = BundleGen(
algo_path=work_dir, data_stats_filename=da_output_yaml, data_src_cfg_name=data_src_cfg
)
bundle_generator.generate(work_dir, num_fold=2)
bundle_generator.generate(work_dir, num_fold=1)
history = bundle_generator.get_history()

for h in history:
Expand All @@ -126,9 +134,9 @@ def test_ensemble(self) -> None:
builder.set_ensemble_method(AlgoEnsembleBestN(n_best=2))
ensemble = builder.get_ensemble()
preds = ensemble(pred_param)
self.assertTupleEqual(preds[0].shape, (2, 64, 64, 64))
self.assertTupleEqual(preds[0].shape, (2, 24, 24, 24))

builder.set_ensemble_method(AlgoEnsembleBestByFold(2))
builder.set_ensemble_method(AlgoEnsembleBestByFold(1))
ensemble = builder.get_ensemble()
for algo in ensemble.get_algo_ensemble():
print(algo[AlgoEnsembleKeys.ID])
Expand Down
75 changes: 20 additions & 55 deletions tests/test_auto3dseg_hpo.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,14 @@
# limitations under the License.

import os
import shutil
import tempfile
import unittest
from functools import partial
from typing import Dict, List

import nibabel as nib
import numpy as np
import torch

from monai.apps.auto3dseg import BundleGen, DataAnalyzer, NNIGen, OptunaGen, import_bundle_algo_history
from monai.bundle.config_parser import ConfigParser
Expand All @@ -28,6 +28,23 @@
_, has_tb = optional_import("torch.utils.tensorboard", name="SummaryWriter")
optuna, has_optuna = optional_import("optuna")

num_gpus = 4 if torch.cuda.device_count() > 4 else torch.cuda.device_count()

override_param = (
{
"CUDA_VISIBLE_DEVICES": list(range(num_gpus)),
"num_iterations": int(4 / num_gpus),
"num_iterations_per_validation": int(4 / num_gpus),
"num_images_per_batch": 2,
"num_epochs": 1,
"num_warmup_iterations": int(4 / num_gpus),
"use_pretrain": False,
"pretrained_path": "",
}
if torch.cuda.is_available()
else {}
)


def skip_if_no_optuna(obj):
"""
Expand Down Expand Up @@ -76,7 +93,7 @@ def setUp(self) -> None:

# Generate a fake dataset
for d in fake_datalist["testing"] + fake_datalist["training"]:
im, seg = create_test_image_3d(64, 64, 64, rad_max=10, num_seg_classes=1)
im, seg = create_test_image_3d(24, 24, 24, rad_max=10, num_seg_classes=1)
nib_image = nib.Nifti1Image(im, affine=np.eye(4))
image_fpath = os.path.join(dataroot, d["image"])
nib.save(nib_image, image_fpath)
Expand Down Expand Up @@ -108,21 +125,14 @@ def setUp(self) -> None:
bundle_generator = BundleGen(
algo_path=work_dir, data_stats_filename=da_output_yaml, data_src_cfg_name=data_src_cfg
)
bundle_generator.generate(work_dir, num_fold=2)
bundle_generator.generate(work_dir, num_fold=1)

self.history = bundle_generator.get_history()
self.work_dir = work_dir
self.test_path = test_path

@skip_if_no_cuda
def test_run_algo(self) -> None:
override_param = {
"num_iterations": 8,
"num_iterations_per_validation": 4,
"num_images_per_batch": 2,
"num_epochs": 2,
"num_warmup_iterations": 4,
}

algo_dict = self.history[0]
algo_name = list(algo_dict.keys())[0]
Expand All @@ -135,14 +145,6 @@ def test_run_algo(self) -> None:
@skip_if_no_cuda
@skip_if_no_optuna
def test_run_optuna(self) -> None:
override_param = {
"num_iterations": 8,
"num_iterations_per_validation": 4,
"num_images_per_batch": 2,
"num_epochs": 2,
"num_warmup_iterations": 4,
}

algo_dict = self.history[0]
algo_name = list(algo_dict.keys())[0]
algo = algo_dict[algo_name]
Expand All @@ -164,45 +166,8 @@ def get_hyperparameters(self):
)
print(f"Best value: {study.best_value} (params: {study.best_params})\n")

@skip_if_no_cuda
def test_run_algo_after_move_files(self) -> None:
override_param = {
"num_iterations": 8,
"num_iterations_per_validation": 4,
"num_images_per_batch": 2,
"num_epochs": 2,
"num_warmup_iterations": 4,
}

algo_dict = self.history[0]
algo_name = list(algo_dict.keys())[0]
algo = algo_dict[algo_name]
nni_gen = NNIGen(algo=algo, params=override_param)
obj_filename = nni_gen.get_obj_filename()

work_dir_2 = os.path.join(self.test_path, "workdir2")
os.makedirs(work_dir_2)
algorithm_template = os.path.join(self.work_dir, "algorithm_templates")
algorithm_templates_2 = os.path.join(work_dir_2, "algorithm_templates")
algo_dir = os.path.dirname(obj_filename)
algo_dir_2 = os.path.join(work_dir_2, os.path.basename(algo_dir))

obj_filename_2 = os.path.join(algo_dir_2, "algo_object.pkl")
shutil.copytree(algorithm_template, algorithm_templates_2)
shutil.copytree(algo_dir, algo_dir_2)
# this function will be used in HPO via Python Fire in remote
NNIGen().run_algo(obj_filename_2, work_dir_2, template_path=algorithm_templates_2)

@skip_if_no_cuda
def test_get_history(self) -> None:
override_param = {
"num_iterations": 8,
"num_iterations_per_validation": 4,
"num_images_per_batch": 2,
"num_epochs": 2,
"num_warmup_iterations": 4,
}

algo_dict = self.history[0]
algo_name = list(algo_dict.keys())[0]
algo = algo_dict[algo_name]
Expand Down
59 changes: 35 additions & 24 deletions tests/test_integration_autorunner.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

import nibabel as nib
import numpy as np
import torch

from monai.apps.auto3dseg import AutoRunner
from monai.bundle.config_parser import ConfigParser
Expand Down Expand Up @@ -44,14 +45,21 @@
],
}

train_param = {
"CUDA_VISIBLE_DEVICES": [0],
"num_iterations": 8,
"num_iterations_per_validation": 4,
"num_images_per_batch": 2,
"num_epochs": 2,
"num_warmup_iterations": 4,
}
num_gpus = 4 if torch.cuda.device_count() > 4 else torch.cuda.device_count()
train_param = (
{
"CUDA_VISIBLE_DEVICES": list(range(num_gpus)),
"num_iterations": int(4 / num_gpus),
"num_iterations_per_validation": int(4 / num_gpus),
"num_images_per_batch": 2,
"num_epochs": 1,
"num_warmup_iterations": int(4 / num_gpus),
"use_pretrain": False,
"pretrained_path": "",
}
if torch.cuda.is_available()
else {}
)

pred_param = {"files_slices": slice(0, 1), "mode": "mean", "sigmoid": True}

Expand All @@ -70,7 +78,7 @@ def setUp(self) -> None:

# Generate a fake dataset
for d in sim_datalist["testing"] + sim_datalist["training"]:
im, seg = create_test_image_3d(64, 64, 64, rad_max=10, num_seg_classes=1)
im, seg = create_test_image_3d(24, 24, 24, rad_max=10, num_seg_classes=1)
nib_image = nib.Nifti1Image(im, affine=np.eye(4))
image_fpath = os.path.join(sim_dataroot, d["image"])
nib.save(nib_image, image_fpath)
Expand Down Expand Up @@ -123,22 +131,25 @@ def test_autorunner_hpo(self) -> None:
work_dir = os.path.join(self.test_path, "work_dir")
runner = AutoRunner(work_dir=work_dir, input=self.data_src_cfg, hpo=True, ensemble=False)
hpo_param = {
"num_iterations": 8,
"num_iterations_per_validation": 4,
"num_images_per_batch": 2,
"num_epochs": 2,
"num_warmup_iterations": 4,
"CUDA_VISIBLE_DEVICES": train_param["CUDA_VISIBLE_DEVICES"],
"num_iterations": train_param["num_iterations"],
"num_iterations_per_validation": train_param["num_iterations_per_validation"],
"num_images_per_batch": train_param["num_images_per_batch"],
"num_epochs": train_param["num_epochs"],
"num_warmup_iterations": train_param["num_warmup_iterations"],
"use_pretrain": train_param["use_pretrain"],
"pretrained_path": train_param["pretrained_path"],
# below are to shorten the time for dints
"training#num_iterations": 8,
"training#num_iterations_per_validation": 4,
"training#num_images_per_batch": 2,
"training#num_epochs": 2,
"training#num_warmup_iterations": 4,
"searching#num_iterations": 8,
"searching#num_iterations_per_validation": 4,
"searching#num_images_per_batch": 2,
"searching#num_epochs": 2,
"searching#num_warmup_iterations": 4,
"training#num_iterations": train_param["num_iterations"],
"training#num_iterations_per_validation": train_param["num_iterations_per_validation"],
"training#num_images_per_batch": train_param["num_images_per_batch"],
"training#num_epochs": train_param["num_epochs"],
"training#num_warmup_iterations": train_param["num_warmup_iterations"],
"searching#num_iterations": train_param["num_iterations"],
"searching#num_iterations_per_validation": train_param["num_iterations_per_validation"],
"searching#num_images_per_batch": train_param["num_images_per_batch"],
"searching#num_epochs": train_param["num_epochs"],
"searching#num_warmup_iterations": train_param["num_warmup_iterations"],
"nni_dry_run": True,
}
search_space = {"learning_rate": {"_type": "choice", "_value": [0.0001, 0.001, 0.01, 0.1]}}
Expand Down
3 changes: 2 additions & 1 deletion tests/test_weighted_random_sampler_dist.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,11 @@
import torch.distributed as dist

from monai.data import DistributedWeightedRandomSampler
from tests.utils import DistCall, DistTestCase, skip_if_windows
from tests.utils import DistCall, DistTestCase, skip_if_darwin, skip_if_windows


@skip_if_windows
@skip_if_darwin
class DistributedWeightedRandomSamplerTest(DistTestCase):
@DistCall(nnodes=1, nproc_per_node=2)
def test_sampling(self):
Expand Down