From fd6a6b19881cddc11023d0c37e0771252958ce8f Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Mon, 22 Aug 2022 17:46:33 +0800 Subject: [PATCH 1/7] easy-integrate-bundle-v1 Signed-off-by: KumoLiu --- model_zoo/easy_integrate_bundle.py | 190 +++++++++++++++++++++++++++++ 1 file changed, 190 insertions(+) create mode 100644 model_zoo/easy_integrate_bundle.py diff --git a/model_zoo/easy_integrate_bundle.py b/model_zoo/easy_integrate_bundle.py new file mode 100644 index 0000000000..7f636b4722 --- /dev/null +++ b/model_zoo/easy_integrate_bundle.py @@ -0,0 +1,190 @@ +# Copyright (c) MONAI Consortium +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# http://www.apache.org/licenses/LICENSE-2.0 +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import json +import logging +import os + +import monai.bundle +import torch +from monai.bundle import ConfigParser +from monai.engines import EnsembleEvaluator +from sklearn.model_selection import KFold + +logger = logging.getLogger(__name__) + + +class Const: + CONFIGS = ("train.json", "train.yaml") + MULTI_GPU_CONFIGS = ("multi_gpu_train.json", "multi_gpu_train.yaml") + INFERENCE_CONFIGS = ("inference.json", "inference.yaml") + METADATA_JSON = "metadata.json" + + KEY_DEVICE = "device" + KEY_BUNDLE_ROOT = "bundle_root" + KEY_NETWORK = "network" + KEY_NETWORK_DEF = "network_def" + KEY_DATASET_DIR = "dataset_dir" + KEY_TRAIN_TRAINER_MAX_EPOCHS = "train#trainer#max_epochs" + KEY_TRAIN_DATASET_DATA = "train#dataset#data" + KEY_VALIDATE_DATASET_DATA = "validate#dataset#data" + KEY_INFERENCE_DATASET_DATA = "dataset#data" + KEY_MODEL_PYTORCH = "validate#handlers#-1#key_metric_filename" + KEY_INFERENCE_POSTPROCESSING = "postprocessing" + + +class EnsembleTrainTask(): + def __init__(self, path): + config_paths = [c for c in Const.CONFIGS if os.path.exists(os.path.join(path, "configs", c))] + if not config_paths: + logger.warning(f"Ignore {path} as there is no train config {Const.CONFIGS} exists") + return + + self.bundle_path = path + self.bundle_config_path = os.path.join(path, "configs", config_paths[0]) + + self.bundle_config = ConfigParser() + self.bundle_config.read_config(self.bundle_config_path) + self.bundle_config.update({Const.KEY_BUNDLE_ROOT: self.bundle_path}) + + self.bundle_metadata_path = os.path.join(path, "configs", Const.METADATA_JSON) + + def _partition_datalist(self, datalist, n_splits=5, shuffle=False): + logger.info(f"Total Records in Dataset: {len(datalist)}") + kfold = KFold(n_splits=n_splits, shuffle=shuffle) + + train_datalist, val_datalist = [], [] + for train_idx, valid_idx in kfold.split(datalist): + train_datalist.append([datalist[i] for i in train_idx]) + val_datalist.append([datalist[i] for i in valid_idx]) + + logger.info(f"Total Records for Training: {len(train_datalist[0])}") + logger.info(f"Total Records for Validation: {len(val_datalist[0])}") + return train_datalist, val_datalist + + def _device(self, str): + return torch.device(str if torch.cuda.is_available() else "cpu") + + def ensemble_inference(self, device, test_datalist, ensemble='Mean'): + inference_config_paths = [c for c in Const.INFERENCE_CONFIGS if os.path.exists(os.path.join(self.bundle_path, "configs", c))] + if not inference_config_paths: + logger.warning(f"Ignore {self.bundle_path} as there is no inference config {Const.INFERENCE_CONFIGS} exists") + return + + logger.info(f"Total Records in Test Dataset: {len(test_datalist)}") + + bundle_inference_config_path = os.path.join(self.bundle_path, "configs", inference_config_paths[0]) + bundle_inference_config = ConfigParser() + bundle_inference_config.read_config(bundle_inference_config_path) + bundle_inference_config.update({Const.KEY_BUNDLE_ROOT: self.bundle_path}) + bundle_inference_config.update({Const.KEY_INFERENCE_DATASET_DATA: test_datalist}) + + # update postprocessing with mean ensemble or vote ensemble + post_tranform = bundle_inference_config.config['postprocessing'] + ensemble_tranform = { + "_target_": f"{ensemble}Ensembled", + "keys": ["pred", "pred", "pred", "pred", "pred"], + "output_key": "pred" + } + if ensemble == 'Mean': + post_tranform["transforms"].insert(0, ensemble_tranform) + elif ensemble == 'Vote': + post_tranform["transforms"].insert(-1, ensemble_tranform) + else: + raise NotImplementedError + bundle_inference_config.update({Const.KEY_INFERENCE_POSTPROCESSING: post_tranform}) + + # update network weights + _networks = [bundle_inference_config.get_parsed_content("network")]*5 + networks = [] + for i, _network in enumerate(_networks): + _network.load_state_dict(torch.load(self.bundle_path+f"/models/model{i}.pt")) + networks.append(_network) + + evaluator = EnsembleEvaluator( + device=device, + val_data_loader=bundle_inference_config.get_parsed_content("dataloader"), + pred_keys=["pred", "pred", "pred", "pred", "pred"], + networks=networks, + inferer=bundle_inference_config.get_parsed_content("inferer"), + postprocessing=bundle_inference_config.get_parsed_content("postprocessing"), + ) + evaluator.run() + logger.info(f"Inference Finished....") + + def __call__(self, request, datalist, test_datalist=None): + dataset_dir = request.get("dataset_dir", None) + if dataset_dir is None: + logger.warning(f"Ignore dataset dir as there is no dataset dir exists") + return + + train_ds, val_ds = self._partition_datalist(datalist, n_splits=request.get("n_splits", 5)) + fold = 0 + for _train_ds, _val_ds in zip(train_ds, val_ds): + model_pytorch = f'model{fold}.pt' + max_epochs = request.get("max_epochs", 50) + multi_gpu = request.get("multi_gpu", False) + multi_gpu = multi_gpu if torch.cuda.device_count() > 1 else False + + gpus = request.get("gpus", "all") + gpus = list(range(torch.cuda.device_count())) if gpus == "all" else [int(g) for g in gpus.split(",")] + logger.info(f"Using Multi GPU: {multi_gpu}; GPUS: {gpus}") + logger.info(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}") + + device = self._device(request.get("device", "cuda:0")) + logger.info(f"Using device: {device}") + + overrides = { + Const.KEY_BUNDLE_ROOT: self.bundle_path, + Const.KEY_TRAIN_TRAINER_MAX_EPOCHS: max_epochs, + Const.KEY_TRAIN_DATASET_DATA: _train_ds, + Const.KEY_VALIDATE_DATASET_DATA: _val_ds, + Const.KEY_DATASET_DIR: dataset_dir, + Const.KEY_MODEL_PYTORCH: model_pytorch, + } + + if multi_gpu: + pass + else: + train_config = ConfigParser() + train_config.read_config(f=self.bundle_config.config) + train_config.update(pairs=overrides) + train_config_path = os.path.join(self.bundle_path, "configs", f"train_fold{fold}.json") + ConfigParser.export_config_file(train_config.config, train_config_path, indent=2) + monai.bundle.run( + "training", + meta_file=self.bundle_metadata_path, + config_file=train_config_path, + ) + + logger.info(f"Fold{fold} Training Finished....") + + if test_datalist is not None: + device = self._device(request.get("device", "cuda:0")) + self.ensemble_inference(device, test_datalist, ensemble=request.get("ensemble", "Mean")) + + +if __name__ == '__main__': + request = { + 'dataset_dir': '/workspace/Data/Task09_Spleen', + 'max_epochs': 6, + 'ensemble': "Mean", + 'n_splits': 5 + } + datalist_path = request['dataset_dir']+'/dataset.json' + with open(datalist_path) as fp: + datalist = json.load(fp) + + + train_datalist = [{"image": d["image"].replace('./', f'{request["dataset_dir"]}/'), "label": d["label"].replace('./', f'{request["dataset_dir"]}/')} for d in datalist['training'] if d] + test_datalist = [{"image": d.replace('./', f'{request["dataset_dir"]}/')} for d in datalist['test'] if d] + bundle_root = '/workspace/Code/Bundles/spleen_ct_segmentation' + EnsembleTrainTask = EnsembleTrainTask(bundle_root) + EnsembleTrainTask(request, train_datalist, test_datalist) \ No newline at end of file From 58ce3ca7c8914590a0d39122e48cca789b62d479 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 22 Aug 2022 10:01:02 +0000 Subject: [PATCH 2/7] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- model_zoo/easy_integrate_bundle.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/model_zoo/easy_integrate_bundle.py b/model_zoo/easy_integrate_bundle.py index 7f636b4722..0f6c078f6e 100644 --- a/model_zoo/easy_integrate_bundle.py +++ b/model_zoo/easy_integrate_bundle.py @@ -77,7 +77,7 @@ def ensemble_inference(self, device, test_datalist, ensemble='Mean'): if not inference_config_paths: logger.warning(f"Ignore {self.bundle_path} as there is no inference config {Const.INFERENCE_CONFIGS} exists") return - + logger.info(f"Total Records in Test Dataset: {len(test_datalist)}") bundle_inference_config_path = os.path.join(self.bundle_path, "configs", inference_config_paths[0]) @@ -124,7 +124,7 @@ def __call__(self, request, datalist, test_datalist=None): if dataset_dir is None: logger.warning(f"Ignore dataset dir as there is no dataset dir exists") return - + train_ds, val_ds = self._partition_datalist(datalist, n_splits=request.get("n_splits", 5)) fold = 0 for _train_ds, _val_ds in zip(train_ds, val_ds): @@ -165,7 +165,7 @@ def __call__(self, request, datalist, test_datalist=None): ) logger.info(f"Fold{fold} Training Finished....") - + if test_datalist is not None: device = self._device(request.get("device", "cuda:0")) self.ensemble_inference(device, test_datalist, ensemble=request.get("ensemble", "Mean")) @@ -181,10 +181,10 @@ def __call__(self, request, datalist, test_datalist=None): datalist_path = request['dataset_dir']+'/dataset.json' with open(datalist_path) as fp: datalist = json.load(fp) - + train_datalist = [{"image": d["image"].replace('./', f'{request["dataset_dir"]}/'), "label": d["label"].replace('./', f'{request["dataset_dir"]}/')} for d in datalist['training'] if d] test_datalist = [{"image": d.replace('./', f'{request["dataset_dir"]}/')} for d in datalist['test'] if d] bundle_root = '/workspace/Code/Bundles/spleen_ct_segmentation' EnsembleTrainTask = EnsembleTrainTask(bundle_root) - EnsembleTrainTask(request, train_datalist, test_datalist) \ No newline at end of file + EnsembleTrainTask(request, train_datalist, test_datalist) From 7b69b3ade517ab25c587c7025ee77d6ddd290260 Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Tue, 23 Aug 2022 14:00:44 +0800 Subject: [PATCH 3/7] add multigpu implementation Signed-off-by: KumoLiu --- model_zoo/easy_integrate_bundle.py | 66 +++++++++++++++++++++++++----- 1 file changed, 56 insertions(+), 10 deletions(-) diff --git a/model_zoo/easy_integrate_bundle.py b/model_zoo/easy_integrate_bundle.py index 7f636b4722..a883d349c6 100644 --- a/model_zoo/easy_integrate_bundle.py +++ b/model_zoo/easy_integrate_bundle.py @@ -11,6 +11,7 @@ import json import logging import os +import subprocess import monai.bundle import torch @@ -138,7 +139,7 @@ def __call__(self, request, datalist, test_datalist=None): logger.info(f"Using Multi GPU: {multi_gpu}; GPUS: {gpus}") logger.info(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}") - device = self._device(request.get("device", "cuda:0")) + device = self._device(request.get("device", "cuda")) logger.info(f"Using device: {device}") overrides = { @@ -148,35 +149,80 @@ def __call__(self, request, datalist, test_datalist=None): Const.KEY_VALIDATE_DATASET_DATA: _val_ds, Const.KEY_DATASET_DIR: dataset_dir, Const.KEY_MODEL_PYTORCH: model_pytorch, + Const.KEY_DEVICE: device, } if multi_gpu: - pass + config_paths = [ + c for c in Const.MULTI_GPU_CONFIGS if os.path.exists(os.path.join(self.bundle_path, "configs", c)) + ] + if not config_paths: + logger.warning(f"Ignore Multi-GPU Training; No multi-gpu train config {Const.MULTI_GPU_CONFIGS} exists") + return + + train_path = os.path.join(self.bundle_path, "configs", f"train_multigpu_fold{fold}.json") + multi_gpu_train_path = os.path.join(self.bundle_path, "configs", config_paths[0]) + logging_file = os.path.join(self.bundle_path, "configs", "logging.conf") + for k, v in overrides.items(): + if k != Const.KEY_DEVICE: + self.bundle_config.set(v, k) + ConfigParser.export_config_file(self.bundle_config.config, train_path, indent=2) + + env = os.environ.copy() + env["CUDA_VISIBLE_DEVICES"] = ",".join([str(g) for g in gpus]) + logger.info(f"Using CUDA_VISIBLE_DEVICES: {env['CUDA_VISIBLE_DEVICES']}") + cmd = [ + "torchrun", + "--standalone", + "--nnodes=1", + f"--nproc_per_node={len(gpus)}", + "-m", + "monai.bundle", + "run", + "training", + "--meta_file", + self.bundle_metadata_path, + "--config_file", + f"['{train_path}','{multi_gpu_train_path}']", + "--logging_file", + logging_file, + ] + self.run_command(cmd, env) else: - train_config = ConfigParser() - train_config.read_config(f=self.bundle_config.config) - train_config.update(pairs=overrides) - train_config_path = os.path.join(self.bundle_path, "configs", f"train_fold{fold}.json") - ConfigParser.export_config_file(train_config.config, train_config_path, indent=2) monai.bundle.run( "training", meta_file=self.bundle_metadata_path, - config_file=train_config_path, + config_file=self.bundle_config_path, + **overrides, ) + fold += 1 logger.info(f"Fold{fold} Training Finished....") if test_datalist is not None: device = self._device(request.get("device", "cuda:0")) self.ensemble_inference(device, test_datalist, ensemble=request.get("ensemble", "Mean")) + + def run_command(self, cmd, env): + process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True, env=env) + while process.poll() is None: + line = process.stdout.readline() + line = line.rstrip() + if line: + print(line, flush=True) + + logger.info(f"Return code: {process.returncode}") + process.stdout.close() if __name__ == '__main__': + os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1" request = { 'dataset_dir': '/workspace/Data/Task09_Spleen', 'max_epochs': 6, - 'ensemble': "Mean", - 'n_splits': 5 + 'ensemble': "Mean", # Mean or Vote + 'n_splits': 5, + 'multi_gpu': True } datalist_path = request['dataset_dir']+'/dataset.json' with open(datalist_path) as fp: From e4fd5444fc2ec784eb4445f862547444a2015fd8 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 23 Aug 2022 06:06:38 +0000 Subject: [PATCH 4/7] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- model_zoo/easy_integrate_bundle.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model_zoo/easy_integrate_bundle.py b/model_zoo/easy_integrate_bundle.py index 99cee6ee7c..9a44d8e635 100644 --- a/model_zoo/easy_integrate_bundle.py +++ b/model_zoo/easy_integrate_bundle.py @@ -202,7 +202,7 @@ def __call__(self, request, datalist, test_datalist=None): if test_datalist is not None: device = self._device(request.get("device", "cuda:0")) self.ensemble_inference(device, test_datalist, ensemble=request.get("ensemble", "Mean")) - + def run_command(self, cmd, env): process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True, env=env) while process.poll() is None: From dd80bda0231b2429b308e65e7525f4ed9e28c854 Mon Sep 17 00:00:00 2001 From: KumoLiu Date: Sat, 10 Sep 2022 23:14:16 +0800 Subject: [PATCH 5/7] update modules tutorial Signed-off-by: KumoLiu --- model_zoo/easy_integrate_bundle.py | 236 ------------------ modules/3d_image_transforms.ipynb | 14 +- modules/autoencoder_mednist.ipynb | 10 +- modules/batch_output_transform.ipynb | 20 +- .../benchmark_global_mutual_information.ipynb | 4 +- modules/compute_metric.py | 27 +- .../cross_validation_models_ensemble.ipynb | 8 +- modules/dice_loss_metric_notes.ipynb | 12 +- modules/integrate_3rd_party_transforms.ipynb | 8 +- ...ansforms_and_test_time_augmentations.ipynb | 8 +- modules/jupyter_utils.ipynb | 6 +- modules/layer_wise_learning_rate.ipynb | 6 +- modules/learning_rate.ipynb | 6 +- 13 files changed, 60 insertions(+), 305 deletions(-) delete mode 100644 model_zoo/easy_integrate_bundle.py diff --git a/model_zoo/easy_integrate_bundle.py b/model_zoo/easy_integrate_bundle.py deleted file mode 100644 index 9a44d8e635..0000000000 --- a/model_zoo/easy_integrate_bundle.py +++ /dev/null @@ -1,236 +0,0 @@ -# Copyright (c) MONAI Consortium -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# http://www.apache.org/licenses/LICENSE-2.0 -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import json -import logging -import os -import subprocess - -import monai.bundle -import torch -from monai.bundle import ConfigParser -from monai.engines import EnsembleEvaluator -from sklearn.model_selection import KFold - -logger = logging.getLogger(__name__) - - -class Const: - CONFIGS = ("train.json", "train.yaml") - MULTI_GPU_CONFIGS = ("multi_gpu_train.json", "multi_gpu_train.yaml") - INFERENCE_CONFIGS = ("inference.json", "inference.yaml") - METADATA_JSON = "metadata.json" - - KEY_DEVICE = "device" - KEY_BUNDLE_ROOT = "bundle_root" - KEY_NETWORK = "network" - KEY_NETWORK_DEF = "network_def" - KEY_DATASET_DIR = "dataset_dir" - KEY_TRAIN_TRAINER_MAX_EPOCHS = "train#trainer#max_epochs" - KEY_TRAIN_DATASET_DATA = "train#dataset#data" - KEY_VALIDATE_DATASET_DATA = "validate#dataset#data" - KEY_INFERENCE_DATASET_DATA = "dataset#data" - KEY_MODEL_PYTORCH = "validate#handlers#-1#key_metric_filename" - KEY_INFERENCE_POSTPROCESSING = "postprocessing" - - -class EnsembleTrainTask(): - def __init__(self, path): - config_paths = [c for c in Const.CONFIGS if os.path.exists(os.path.join(path, "configs", c))] - if not config_paths: - logger.warning(f"Ignore {path} as there is no train config {Const.CONFIGS} exists") - return - - self.bundle_path = path - self.bundle_config_path = os.path.join(path, "configs", config_paths[0]) - - self.bundle_config = ConfigParser() - self.bundle_config.read_config(self.bundle_config_path) - self.bundle_config.update({Const.KEY_BUNDLE_ROOT: self.bundle_path}) - - self.bundle_metadata_path = os.path.join(path, "configs", Const.METADATA_JSON) - - def _partition_datalist(self, datalist, n_splits=5, shuffle=False): - logger.info(f"Total Records in Dataset: {len(datalist)}") - kfold = KFold(n_splits=n_splits, shuffle=shuffle) - - train_datalist, val_datalist = [], [] - for train_idx, valid_idx in kfold.split(datalist): - train_datalist.append([datalist[i] for i in train_idx]) - val_datalist.append([datalist[i] for i in valid_idx]) - - logger.info(f"Total Records for Training: {len(train_datalist[0])}") - logger.info(f"Total Records for Validation: {len(val_datalist[0])}") - return train_datalist, val_datalist - - def _device(self, str): - return torch.device(str if torch.cuda.is_available() else "cpu") - - def ensemble_inference(self, device, test_datalist, ensemble='Mean'): - inference_config_paths = [c for c in Const.INFERENCE_CONFIGS if os.path.exists(os.path.join(self.bundle_path, "configs", c))] - if not inference_config_paths: - logger.warning(f"Ignore {self.bundle_path} as there is no inference config {Const.INFERENCE_CONFIGS} exists") - return - - logger.info(f"Total Records in Test Dataset: {len(test_datalist)}") - - bundle_inference_config_path = os.path.join(self.bundle_path, "configs", inference_config_paths[0]) - bundle_inference_config = ConfigParser() - bundle_inference_config.read_config(bundle_inference_config_path) - bundle_inference_config.update({Const.KEY_BUNDLE_ROOT: self.bundle_path}) - bundle_inference_config.update({Const.KEY_INFERENCE_DATASET_DATA: test_datalist}) - - # update postprocessing with mean ensemble or vote ensemble - post_tranform = bundle_inference_config.config['postprocessing'] - ensemble_tranform = { - "_target_": f"{ensemble}Ensembled", - "keys": ["pred", "pred", "pred", "pred", "pred"], - "output_key": "pred" - } - if ensemble == 'Mean': - post_tranform["transforms"].insert(0, ensemble_tranform) - elif ensemble == 'Vote': - post_tranform["transforms"].insert(-1, ensemble_tranform) - else: - raise NotImplementedError - bundle_inference_config.update({Const.KEY_INFERENCE_POSTPROCESSING: post_tranform}) - - # update network weights - _networks = [bundle_inference_config.get_parsed_content("network")]*5 - networks = [] - for i, _network in enumerate(_networks): - _network.load_state_dict(torch.load(self.bundle_path+f"/models/model{i}.pt")) - networks.append(_network) - - evaluator = EnsembleEvaluator( - device=device, - val_data_loader=bundle_inference_config.get_parsed_content("dataloader"), - pred_keys=["pred", "pred", "pred", "pred", "pred"], - networks=networks, - inferer=bundle_inference_config.get_parsed_content("inferer"), - postprocessing=bundle_inference_config.get_parsed_content("postprocessing"), - ) - evaluator.run() - logger.info(f"Inference Finished....") - - def __call__(self, request, datalist, test_datalist=None): - dataset_dir = request.get("dataset_dir", None) - if dataset_dir is None: - logger.warning(f"Ignore dataset dir as there is no dataset dir exists") - return - - train_ds, val_ds = self._partition_datalist(datalist, n_splits=request.get("n_splits", 5)) - fold = 0 - for _train_ds, _val_ds in zip(train_ds, val_ds): - model_pytorch = f'model{fold}.pt' - max_epochs = request.get("max_epochs", 50) - multi_gpu = request.get("multi_gpu", False) - multi_gpu = multi_gpu if torch.cuda.device_count() > 1 else False - - gpus = request.get("gpus", "all") - gpus = list(range(torch.cuda.device_count())) if gpus == "all" else [int(g) for g in gpus.split(",")] - logger.info(f"Using Multi GPU: {multi_gpu}; GPUS: {gpus}") - logger.info(f"CUDA_VISIBLE_DEVICES: {os.environ.get('CUDA_VISIBLE_DEVICES')}") - - device = self._device(request.get("device", "cuda")) - logger.info(f"Using device: {device}") - - overrides = { - Const.KEY_BUNDLE_ROOT: self.bundle_path, - Const.KEY_TRAIN_TRAINER_MAX_EPOCHS: max_epochs, - Const.KEY_TRAIN_DATASET_DATA: _train_ds, - Const.KEY_VALIDATE_DATASET_DATA: _val_ds, - Const.KEY_DATASET_DIR: dataset_dir, - Const.KEY_MODEL_PYTORCH: model_pytorch, - Const.KEY_DEVICE: device, - } - - if multi_gpu: - config_paths = [ - c for c in Const.MULTI_GPU_CONFIGS if os.path.exists(os.path.join(self.bundle_path, "configs", c)) - ] - if not config_paths: - logger.warning(f"Ignore Multi-GPU Training; No multi-gpu train config {Const.MULTI_GPU_CONFIGS} exists") - return - - train_path = os.path.join(self.bundle_path, "configs", f"train_multigpu_fold{fold}.json") - multi_gpu_train_path = os.path.join(self.bundle_path, "configs", config_paths[0]) - logging_file = os.path.join(self.bundle_path, "configs", "logging.conf") - for k, v in overrides.items(): - if k != Const.KEY_DEVICE: - self.bundle_config.set(v, k) - ConfigParser.export_config_file(self.bundle_config.config, train_path, indent=2) - - env = os.environ.copy() - env["CUDA_VISIBLE_DEVICES"] = ",".join([str(g) for g in gpus]) - logger.info(f"Using CUDA_VISIBLE_DEVICES: {env['CUDA_VISIBLE_DEVICES']}") - cmd = [ - "torchrun", - "--standalone", - "--nnodes=1", - f"--nproc_per_node={len(gpus)}", - "-m", - "monai.bundle", - "run", - "training", - "--meta_file", - self.bundle_metadata_path, - "--config_file", - f"['{train_path}','{multi_gpu_train_path}']", - "--logging_file", - logging_file, - ] - self.run_command(cmd, env) - else: - monai.bundle.run( - "training", - meta_file=self.bundle_metadata_path, - config_file=self.bundle_config_path, - **overrides, - ) - fold += 1 - - logger.info(f"Fold{fold} Training Finished....") - - if test_datalist is not None: - device = self._device(request.get("device", "cuda:0")) - self.ensemble_inference(device, test_datalist, ensemble=request.get("ensemble", "Mean")) - - def run_command(self, cmd, env): - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, universal_newlines=True, env=env) - while process.poll() is None: - line = process.stdout.readline() - line = line.rstrip() - if line: - print(line, flush=True) - - logger.info(f"Return code: {process.returncode}") - process.stdout.close() - - -if __name__ == '__main__': - os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1" - request = { - 'dataset_dir': '/workspace/Data/Task09_Spleen', - 'max_epochs': 6, - 'ensemble': "Mean", # Mean or Vote - 'n_splits': 5, - 'multi_gpu': True - } - datalist_path = request['dataset_dir']+'/dataset.json' - with open(datalist_path) as fp: - datalist = json.load(fp) - - - train_datalist = [{"image": d["image"].replace('./', f'{request["dataset_dir"]}/'), "label": d["label"].replace('./', f'{request["dataset_dir"]}/')} for d in datalist['training'] if d] - test_datalist = [{"image": d.replace('./', f'{request["dataset_dir"]}/')} for d in datalist['test'] if d] - bundle_root = '/workspace/Code/Bundles/spleen_ct_segmentation' - EnsembleTrainTask = EnsembleTrainTask(bundle_root) - EnsembleTrainTask(request, train_datalist, test_datalist) diff --git a/modules/3d_image_transforms.ipynb b/modules/3d_image_transforms.ipynb index 4bf751ee92..b5134b229c 100644 --- a/modules/3d_image_transforms.ipynb +++ b/modules/3d_image_transforms.ipynb @@ -37,7 +37,7 @@ "outputs": [], "source": [ "from monai.transforms import (\n", - " AddChanneld,\n", + " EnsureChannelFirstd,\n", " LoadImage,\n", " LoadImaged,\n", " Orientationd,\n", @@ -390,7 +390,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Add the channel dimension\n", + "## Ensure the first dimension is channel\n", "\n", "Most of MONAI's image transformations assume that the input data has the shape: \n", "`[num_channels, spatial_dim_1, spatial_dim_2, ... ,spatial_dim_n]` \n", @@ -415,8 +415,8 @@ } ], "source": [ - "add_channel = AddChanneld(keys=[\"image\", \"label\"])\n", - "datac_dict = add_channel(data_dict)\n", + "ensure_channel_first = EnsureChannelFirstd(keys=[\"image\", \"label\"])\n", + "datac_dict = ensure_channel_first(data_dict)\n", "print(f\"image shape: {datac_dict['image'].shape}\")" ] }, @@ -519,7 +519,7 @@ "\n", "The input volumes might have different voxel sizes. \n", "The following transform is created to normalise the volumes to have (1.5, 1.5, 5.) millimetre voxel size. \n", - "The transform is set to read the original voxel size information from `data_dict['image.affine']`, \n", + "The transform is set to read the original voxel size information from `data_dict['image`].affine`, \n", "which is from the corresponding NIfTI file, loaded earlier by `LoadImaged`." ] }, @@ -574,7 +574,7 @@ "metadata": {}, "source": [ "To track the spacing changes, the data_dict was updated by `Spacingd`:\n", - "* An `image.original_affine` key is added to the `data_dict`, logs the original affine.\n", + "* An `image.meta['original_affine']` key is added to the `data_dict`, logs the original affine.\n", "* An `image.affine` key is updated to have the current affine." ] }, @@ -829,7 +829,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.0" + "version": "3.8.13" } }, "nbformat": 4, diff --git a/modules/autoencoder_mednist.ipynb b/modules/autoencoder_mednist.ipynb index 1067f1e6b5..ebef31f7bc 100644 --- a/modules/autoencoder_mednist.ipynb +++ b/modules/autoencoder_mednist.ipynb @@ -96,7 +96,7 @@ "from monai.data import CacheDataset, DataLoader\n", "from monai.networks.nets import AutoEncoder\n", "from monai.transforms import (\n", - " AddChannelD,\n", + " EnsureChannelFirstD,\n", " Compose,\n", " LoadImageD,\n", " RandFlipD,\n", @@ -278,7 +278,7 @@ "train_transforms = Compose(\n", " [\n", " LoadImageD(keys=[\"im\"]),\n", - " AddChannelD(keys=[\"im\"]),\n", + " EnsureChannelFirstD(keys=[\"im\"]),\n", " ScaleIntensityD(keys=[\"im\"]),\n", " RandRotateD(keys=[\"im\"], range_x=np.pi / 12, prob=0.5, keep_size=True),\n", " RandFlipD(keys=[\"im\"], spatial_axis=0, prob=0.5),\n", @@ -291,7 +291,7 @@ "test_transforms = Compose(\n", " [\n", " LoadImageD(keys=[\"im\"]),\n", - " AddChannelD(keys=[\"im\"]),\n", + " EnsureChannelFirstD(keys=[\"im\"]),\n", " ScaleIntensityD(keys=[\"im\"]),\n", " EnsureTypeD(keys=[\"im\"]),\n", " NoiseLambda,\n", @@ -510,7 +510,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, @@ -524,7 +524,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.7.10" + "version": "3.8.13" } }, "nbformat": 4, diff --git a/modules/batch_output_transform.ipynb b/modules/batch_output_transform.ipynb index d4cc49f1d0..2427a96b6d 100644 --- a/modules/batch_output_transform.ipynb +++ b/modules/batch_output_transform.ipynb @@ -94,7 +94,7 @@ "from monai.networks.nets import UNet\n", "from monai.transforms import (\n", " Activationsd,\n", - " AsChannelFirstd,\n", + " EnsureChannelFirstd,\n", " AsDiscreted,\n", " Compose,\n", " KeepLargestConnectedComponentd,\n", @@ -119,7 +119,7 @@ "First of all, let's take a look at the possible data shape in `engine.state.batch` and `engine.state.output`.\n", "\n", "### engine.state.batch\n", - "(1) For a common ignite program, `batch` is usually the iterable output of PyTorch DataLoader, for example: `{\"image\": Tensor, \"label\" Tensor, \"image_meta_dict\": Dict}` where `image` and `label` are batch-first arrays, `image_meta_dict` is a dictionary of meta information for the input images, every item is a batch:\n", + "(1) For a common ignite program, `batch` is usually the iterable output of PyTorch DataLoader, for example: `{\"image\": MetaTensor, \"label\" MetaTensor, \"image_meta_dict\": Dict}` where `image` and `label` are batch-first arrays, `image_meta_dict` is a dictionary of meta information for the input images, every item is a batch:\n", "```\n", "image.shape = [2, 4, 64, 64, 64] # here 2 is batch size, 4 is channels\n", "label.shape = [2, 3, 64, 64, 64]\n", @@ -129,7 +129,7 @@ "(2) For MONAI engines, it will automatically `decollate` the batch data into a list of `channel-first` data after every iteration. For more details about `decollate`, please refer to: https://github.com/Project-MONAI/tutorials/blob/main/modules/decollate_batch.ipynb.\n", "\n", "The `engine.state.batch` example in (1) will be decollated into a list of dictionaries:\n", - "`[{\"image\": Tensor, \"label\" Tensor, \"image_meta_dict\": Dict}, {\"image\": Tensor, \"label\" Tensor, \"image_meta_dict\": Dict}]`.\n", + "`[{\"image\": MetaTensor, \"label\" MetaTensor, \"image_meta_dict\": Dict}, {\"image\": MetaTensor, \"label\" MetaTensor, \"image_meta_dict\": Dict}]`.\n", "\n", "each item of the list can be:\n", "```\n", @@ -139,7 +139,7 @@ "```\n", "\n", "### engine.state.output\n", - "(1) For a common ignite program, `output` is usually the output data of current iteration, for example: `{\"pred\": Tensor, \"label\": Tensor, \"loss\": scalar}` where `pred` and `label` are batch-first arrays, `loss` is a scalar value of current iteration:\n", + "(1) For a common ignite program, `output` is usually the output data of current iteration, for example: `{\"pred\": MetaTensor, \"label\": MetaTensor, \"loss\": scalar}` where `pred` and `label` are batch-first arrays, `loss` is a scalar value of current iteration:\n", "```\n", "pred.shape = [2, 3, 64, 64, 64] # here 2 is batch size, 3 is channels\n", "label.shape = [2, 3, 64, 64, 64]\n", @@ -148,7 +148,7 @@ "\n", "(2) For MONAI engines, it will also automatically `decollate` the output data into a list of `channel-first` data after every iteration.\n", "The `engine.state.output` example in (1) will be decollated into a list of dictionaries:\n", - "`[{\"pred\": Tensor, \"label\": Tensor, \"loss\" 0.4534}, {\"pred\": Tensor, \"label\": Tensor, \"loss\" 0.4534}]`. Please note that it replicated the scalar value of `loss` to every item of the decollated list." + "`[{\"pred\": MetaTensor, \"label\": MetaTensor, \"loss\" 0.4534}, {\"pred\": MetaTensor, \"label\": MetaTensor, \"loss\" 0.4534}]`. Please note that it replicated the scalar value of `loss` to every item of the decollated list." ] }, { @@ -159,9 +159,9 @@ "\n", "Now let's analyze the cases of extracting data from `engine.state.batch` or `engine.state.output`. To simplify the operation, we developed a utility function `monai.handlers.from_engine` to automatically handle all the common cases.\n", "\n", - "(1) To get the meta data from dictionary format `engine.state.batch`, set arg `batch_transform=lambda x: x[\"image_meta_dict\"]`.\n", + "(1) To get the meta data from dictionary format `engine.state.batch`, set arg `batch_transform=lambda x: x.meta`.\n", "\n", - "(2) To get the meta data from decollated list of dictionaries `engine.state.batch`, set arg `lambda x: [i[\"image_meta_dict\"] for i in x]` or `from_engine(\"image_meta_dict\")`.\n", + "(2) To get the meta data from decollated list of dictionaries `engine.state.batch`, set arg `lambda x: [i.meta for i in x]` or `from_engine(\"image_meta_dict\")`.\n", "\n", "(3) Metrics usually expect a `Tuple(pred, label)` input, if `engine.state.output` is a dictionary, set arg `output_transform=lambda x: (x[\"pred\"], x[\"label\"])`. If decollated list, set arg `lambda x: ([i[\"pred\"] for i in x], [i[\"label\"] for i in x])` or `from_engine([\"pred\", \"label\"])`.\n", "\n", @@ -244,7 +244,7 @@ "train_transforms = Compose(\n", " [\n", " LoadImaged(keys=[\"image\", \"label\"]),\n", - " AsChannelFirstd(keys=[\"image\", \"label\"], channel_dim=-1),\n", + " EnsureChannelFirstd(keys=[\"image\", \"label\"], channel_dim=-1),\n", " ScaleIntensityd(keys=\"image\"),\n", " RandCropByPosNegLabeld(\n", " keys=[\"image\", \"label\"], label_key=\"label\", spatial_size=[96, 96, 96], pos=1, neg=1, num_samples=4\n", @@ -255,7 +255,7 @@ "val_transforms = Compose(\n", " [\n", " LoadImaged(keys=[\"image\", \"label\"]),\n", - " AsChannelFirstd(keys=[\"image\", \"label\"], channel_dim=-1),\n", + " EnsureChannelFirstd(keys=[\"image\", \"label\"], channel_dim=-1),\n", " ScaleIntensityd(keys=\"image\"),\n", " EnsureTyped(keys=[\"image\", \"label\"]),\n", " ]\n", @@ -425,7 +425,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.12" + "version": "3.8.13" } }, "nbformat": 4, diff --git a/modules/benchmark_global_mutual_information.ipynb b/modules/benchmark_global_mutual_information.ipynb index c91e9ce2b4..b9050da271 100644 --- a/modules/benchmark_global_mutual_information.ipynb +++ b/modules/benchmark_global_mutual_information.ipynb @@ -74,7 +74,7 @@ "outputs": [], "source": [ "!python -c \"import monai\" || pip install -q \"monai-weekly[nibabel]\"\n", - "!python -c \"import ants\" || pip install -q antspyx==0.2.9\n", + "!python -c \"import ants\" || pip install -q antspyx==0.3.1\n", "!python -c \"import plotly\" || pip install -q plotly==5.3" ] }, @@ -470,7 +470,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.0" + "version": "3.8.13" } }, "nbformat": 4, diff --git a/modules/compute_metric.py b/modules/compute_metric.py index 2256b19639..492f221b41 100644 --- a/modules/compute_metric.py +++ b/modules/compute_metric.py @@ -15,25 +15,22 @@ It can even run on multi-nodes. Main steps to set up the distributed data parallel: -- Execute `torch.distributed.launch` to create processes on every node for every process. +- Execute `torchrun` to create processes on every node for every process. It receives parameters as below: `--nproc_per_node=NUM_PROCESSES_PER_NODE` `--nnodes=NUM_NODES` - `--node_rank=INDEX_CURRENT_NODE` - `--master_addr="localhost"` - `--master_port=1234` - For more details, refer to https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py. + For more details, refer to https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py. Alternatively, we can also use `torch.multiprocessing.spawn` to start program, but it that case, need to handle all the above parameters and compute `rank` manually, then set to `init_process_group`, etc. - `torch.distributed.launch` is even more efficient than `torch.multiprocessing.spawn`. + `torchrun` is even more efficient than `torch.multiprocessing.spawn`. - Use `init_process_group` to initialize every process. - Partition the saved predictions and labels into ranks for parallel computation. - Compute `Dice Metric` on every process, reduce the results after synchronization. Note: - `torch.distributed.launch` will launch `nnodes * nproc_per_node = world_size` processes in total. + `torchrun` will launch `nnodes * nproc_per_node = world_size` processes in total. Example script to execute this program on a single node with 2 processes: - `python -m torch.distributed.launch --nproc_per_node=2 compute_metric.py` + `torchrun --nproc_per_node=2 compute_metric.py` Referring to: https://pytorch.org/tutorials/intermediate/ddp_tutorial.html @@ -65,7 +62,8 @@ def compute(args): # generate synthetic data for the example - if args.local_rank == 0 and not os.path.exists(args.dir): + local_rank = int(os.environ["LOCAL_RANK"]) + if local_rank == 0 and not os.path.exists(args.dir): # create 16 random pred, label paris for evaluation print(f"generating synthetic data to {args.dir} (this may take a while)") os.makedirs(args.dir) @@ -93,7 +91,7 @@ def compute(args): even_divisible=False, )[dist.get_rank()] - device = torch.device(f"cuda:{args.local_rank}") + device = torch.device(f"cuda:{local_rank}") torch.cuda.set_device(device) # define transforms for predictions and labels transforms = Compose( @@ -116,7 +114,7 @@ def compute(args): result = metric.aggregate().item() filenames = string_list_all_gather(strings=filenames) - if args.local_rank == 0: + if local_rank == 0: print("mean dice: ", result) # generate metrics reports at: output/mean_dice_raw.csv, output/mean_dice_summary.csv, output/metrics.csv write_metrics_reports( @@ -134,16 +132,11 @@ def compute(args): # usage example(refer to https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py): -# python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_PER_NODE -# --nnodes=NUM_NODES --node_rank=INDEX_CURRENT_NODE -# --master_addr="192.168.1.1" --master_port=1234 -# compute_metric.py -d DIR_OF_OUTPUT +# torchrun --standalone --nnodes=NUM_NODES --nproc_per_node=NUM_GPUS_PER_NODE compute_metric.py -d DIR_OF_OUTPUT def main(): parser = argparse.ArgumentParser() parser.add_argument("-d", "--dir", default="./output", type=str, help="root directory of labels and predictions.") - # must parse the command-line argument: ``--local_rank=LOCAL_PROCESS_RANK``, which will be provided by DDP - parser.add_argument("--local_rank", type=int) args = parser.parse_args() compute(args=args) diff --git a/modules/cross_validation_models_ensemble.ipynb b/modules/cross_validation_models_ensemble.ipynb index 6c117b3412..195bc42bc2 100644 --- a/modules/cross_validation_models_ensemble.ipynb +++ b/modules/cross_validation_models_ensemble.ipynb @@ -94,7 +94,7 @@ "from monai.networks.nets import UNet\n", "from monai.transforms import (\n", " Activationsd,\n", - " AsChannelFirstd,\n", + " EnsureChannelFirstd,\n", " AsDiscreted,\n", " Compose,\n", " LoadImaged,\n", @@ -249,7 +249,7 @@ "train_transforms = Compose(\n", " [\n", " LoadImaged(keys=[\"image\", \"label\"]),\n", - " AsChannelFirstd(keys=[\"image\", \"label\"], channel_dim=-1),\n", + " EnsureChannelFirstd(keys=[\"image\", \"label\"], channel_dim=-1),\n", " ScaleIntensityd(keys=[\"image\", \"label\"]),\n", " RandCropByPosNegLabeld(\n", " keys=[\"image\", \"label\"],\n", @@ -266,7 +266,7 @@ "val_transforms = Compose(\n", " [\n", " LoadImaged(keys=[\"image\", \"label\"]),\n", - " AsChannelFirstd(keys=[\"image\", \"label\"], channel_dim=-1),\n", + " EnsureChannelFirstd(keys=[\"image\", \"label\"], channel_dim=-1),\n", " ScaleIntensityd(keys=[\"image\", \"label\"]),\n", " EnsureTyped(keys=[\"image\", \"label\"]),\n", " ]\n", @@ -588,7 +588,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.12" + "version": "3.8.13" } }, "nbformat": 4, diff --git a/modules/dice_loss_metric_notes.ipynb b/modules/dice_loss_metric_notes.ipynb index 92e53b0990..3d626a6adc 100644 --- a/modules/dice_loss_metric_notes.ipynb +++ b/modules/dice_loss_metric_notes.ipynb @@ -20,7 +20,7 @@ "import torch\n", "from monai.losses import DiceLoss\n", "from monai.metrics import DiceMetric\n", - "from monai.transforms import AddChannel, AsDiscrete, Compose\n", + "from monai.transforms import EnsureChannelFirst, AsDiscrete, Compose\n", "\n", "\n", "def print_tensor(name, t):\n", @@ -108,7 +108,7 @@ "outputs": [], "source": [ "# make one hot and add batch dimension\n", - "make_2_class = Compose([AsDiscrete(to_onehot=2), AddChannel()])\n", + "make_2_class = Compose([AsDiscrete(to_onehot=2), EnsureChannelFirst()])\n", "\n", "grnd2 = make_2_class(grnd)\n", "pred2 = make_2_class(pred)" @@ -305,7 +305,7 @@ "outputs": [], "source": [ "# make one hot and add batch dimension\n", - "make_3_class = Compose([AsDiscrete(to_onehot=3), AddChannel()])\n", + "make_3_class = Compose([AsDiscrete(to_onehot=3), EnsureChannelFirst()])\n", "\n", "mgrnd2 = make_3_class(mgrnd)\n", "mpred2 = make_3_class(mpred)" @@ -398,9 +398,9 @@ ], "metadata": { "kernelspec": { - "display_name": "Python [conda env:monai]", + "display_name": "Python 3 (ipykernel)", "language": "python", - "name": "conda-env-monai-py" + "name": "python3" }, "language_info": { "codemirror_mode": { @@ -412,7 +412,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.8.13" } }, "nbformat": 4, diff --git a/modules/integrate_3rd_party_transforms.ipynb b/modules/integrate_3rd_party_transforms.ipynb index 62bc932433..9a161e5156 100644 --- a/modules/integrate_3rd_party_transforms.ipynb +++ b/modules/integrate_3rd_party_transforms.ipynb @@ -56,7 +56,7 @@ "source": [ "from monai.utils import first, set_determinism\n", "from monai.transforms import (\n", - " AddChanneld,\n", + " EnsureChannelFirstd,\n", " Compose,\n", " CropForegroundd,\n", " LoadImaged,\n", @@ -265,7 +265,7 @@ "source": [ "monai_transforms = [\n", " LoadImaged(keys=[\"image\", \"label\"]),\n", - " AddChanneld(keys=[\"image\", \"label\"]),\n", + " EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n", " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", " Spacingd(keys=[\"image\", \"label\"], pixdim=(\n", " 1.5, 1.5, 2.0), mode=(\"bilinear\", \"nearest\")),\n", @@ -388,7 +388,7 @@ " itk_transforms,\n", " # add another dim as BatchGenerator and\n", " # Rising expects shape [B, C, H, W, D]\n", - " AddChanneld(keys=[\"image\", \"label\"]),\n", + " EnsureChannelFirstd(keys=[\"image\", \"label\"]),\n", " adaptor(batch_generator_transforms, {\"image\": \"image\"}),\n", " EnsureTyped(keys=[\"image\", \"label\"]),\n", " adaptor(rising_transforms, {\"image\": \"image\", \"label\": \"label\"}),\n", @@ -487,7 +487,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.0" + "version": "3.8.13" } }, "nbformat": 4, diff --git a/modules/inverse_transforms_and_test_time_augmentations.ipynb b/modules/inverse_transforms_and_test_time_augmentations.ipynb index a7d10be765..f7627ed9f5 100644 --- a/modules/inverse_transforms_and_test_time_augmentations.ipynb +++ b/modules/inverse_transforms_and_test_time_augmentations.ipynb @@ -131,8 +131,7 @@ "from monai.networks.nets import UNet\n", "from monai.transforms import (\n", " Activations,\n", - " AddChanneld,\n", - " AsChannelFirstd,\n", + " EnsureChannelFirstd,\n", " AsDiscrete,\n", " Compose,\n", " CropForegroundd,\n", @@ -256,8 +255,7 @@ " os.makedirs(os.path.join(data_dir, key), exist_ok=True)\n", "transform_2d_slice = Compose([\n", " LoadImaged(keys),\n", - " AsChannelFirstd(\"image\"),\n", - " AddChanneld(\"label\"),\n", + " EnsureChannelFirstd(keys),\n", " SliceWithMaxNumLabelsd(keys, \"label\"),\n", " SaveSliced(keys, data_dir),\n", "])\n", @@ -789,7 +787,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.0" + "version": "3.8.13" } }, "nbformat": 4, diff --git a/modules/jupyter_utils.ipynb b/modules/jupyter_utils.ipynb index 03c2e443b5..f80450a2d3 100644 --- a/modules/jupyter_utils.ipynb +++ b/modules/jupyter_utils.ipynb @@ -82,7 +82,7 @@ "from monai.data import Dataset, DataLoader, create_test_image_2d\n", "from monai.losses import DiceLoss\n", "from monai.networks.nets import UNet\n", - "from monai.transforms import AddChanneld, Compose, EnsureTyped, AsDiscreted\n", + "from monai.transforms import EnsureChannelFirstd, Compose, EnsureTyped, AsDiscreted\n", "from monai.utils import ThreadContainer\n", "from monai.engines import SupervisedTrainer, SupervisedEvaluator\n", "from monai.utils.enums import CommonKeys\n", @@ -114,7 +114,7 @@ " im, seg = create_test_image_2d(256, 256, num_seg_classes=1, noise_max=0.75, random_state=rs)\n", " data.append({keys[0]: im, keys[1]: seg})\n", "\n", - "trans = Compose([AddChanneld(keys=keys), EnsureTyped(keys=keys)])\n", + "trans = Compose([EnsureChannelFirstd(keys=keys), EnsureTyped(keys=keys)])\n", "\n", "train_ds = Dataset(data[:240], trans)\n", "val_ds = Dataset(data[240:], trans)\n", @@ -301,7 +301,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.12" + "version": "3.8.13" } }, "nbformat": 4, diff --git a/modules/layer_wise_learning_rate.ipynb b/modules/layer_wise_learning_rate.ipynb index 549cb29dc4..651c83021c 100644 --- a/modules/layer_wise_learning_rate.ipynb +++ b/modules/layer_wise_learning_rate.ipynb @@ -50,7 +50,7 @@ "outputs": [], "source": [ "from monai.transforms import (\n", - " AddChanneld,\n", + " EnsureChannelFirstd,\n", " Compose,\n", " LoadImaged,\n", " ScaleIntensityd,\n", @@ -201,7 +201,7 @@ "transform = Compose(\n", " [\n", " LoadImaged(keys=\"image\"),\n", - " AddChanneld(keys=\"image\"),\n", + " EnsureChannelFirstd(keys=\"image\"),\n", " ScaleIntensityd(keys=\"image\"),\n", " EnsureTyped(keys=\"image\"),\n", " ]\n", @@ -1077,7 +1077,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.10" + "version": "3.8.13" } }, "nbformat": 4, diff --git a/modules/learning_rate.ipynb b/modules/learning_rate.ipynb index 0fc1ab802a..6c229678d4 100644 --- a/modules/learning_rate.ipynb +++ b/modules/learning_rate.ipynb @@ -106,7 +106,7 @@ "from monai.transforms import (\n", " Activations,\n", " AsDiscrete,\n", - " AddChanneld,\n", + " EnsureChannelFirstd,\n", " CenterSpatialCropd,\n", " Compose,\n", " LoadImaged,\n", @@ -177,7 +177,7 @@ "transforms = Compose(\n", " [\n", " LoadImaged(keys=\"image\"),\n", - " AddChanneld(keys=\"image\"),\n", + " EnsureChannelFirstd(keys=\"image\"),\n", " ScaleIntensityd(keys=\"image\"),\n", " CenterSpatialCropd(keys=\"image\", roi_size=(20, 20)),\n", " EnsureTyped(keys=\"image\"),\n", @@ -1576,7 +1576,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.12" + "version": "3.8.13" } }, "nbformat": 4, From 8aab3f5b4ffc29a4b566913c6f95639e741e209d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 10 Sep 2022 15:16:33 +0000 Subject: [PATCH 6/7] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- modules/compute_metric.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/compute_metric.py b/modules/compute_metric.py index 492f221b41..15615742fa 100644 --- a/modules/compute_metric.py +++ b/modules/compute_metric.py @@ -62,7 +62,7 @@ def compute(args): # generate synthetic data for the example - local_rank = int(os.environ["LOCAL_RANK"]) + local_rank = int(os.environ["LOCAL_RANK"]) if local_rank == 0 and not os.path.exists(args.dir): # create 16 random pred, label paris for evaluation print(f"generating synthetic data to {args.dir} (this may take a while)") From fb93cb7214fdfb294e8479e6b915f83728209646 Mon Sep 17 00:00:00 2001 From: Wenqi Li <831580+wyli@users.noreply.github.com> Date: Sun, 11 Sep 2022 20:07:29 +0100 Subject: [PATCH 7/7] Update modules/3d_image_transforms.ipynb --- modules/3d_image_transforms.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/3d_image_transforms.ipynb b/modules/3d_image_transforms.ipynb index b5134b229c..e8692bce6b 100644 --- a/modules/3d_image_transforms.ipynb +++ b/modules/3d_image_transforms.ipynb @@ -519,7 +519,7 @@ "\n", "The input volumes might have different voxel sizes. \n", "The following transform is created to normalise the volumes to have (1.5, 1.5, 5.) millimetre voxel size. \n", - "The transform is set to read the original voxel size information from `data_dict['image`].affine`, \n", + "The transform is set to read the original voxel size information from `data_dict['image'].affine`, \n", "which is from the corresponding NIfTI file, loaded earlier by `LoadImaged`." ] },