diff --git a/2d_classification/mednist_tutorial.ipynb b/2d_classification/mednist_tutorial.ipynb index f4bd4339ef..560f5c4883 100644 --- a/2d_classification/mednist_tutorial.ipynb +++ b/2d_classification/mednist_tutorial.ipynb @@ -375,7 +375,7 @@ " [LoadImage(image_only=True), AddChannel(), ScaleIntensity(), EnsureType()])\n", "\n", "y_pred_trans = Compose([EnsureType(), Activations(softmax=True)])\n", - "y_trans = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=num_class)])" + "y_trans = Compose([EnsureType(), AsDiscrete(to_onehot=True, num_classes=num_class)])" ] }, { diff --git a/3d_classification/ignite/densenet_training_dict.py b/3d_classification/ignite/densenet_training_dict.py index b2d8de5d9d..4a61fe5144 100644 --- a/3d_classification/ignite/densenet_training_dict.py +++ b/3d_classification/ignite/densenet_training_dict.py @@ -126,7 +126,7 @@ def prepare_batch(batch, device=None, non_blocking=False): # add evaluation metric to the evaluator engine val_metrics = {metric_name: ROCAUC()} - post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=2)]) + post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, num_classes=2)]) post_pred = Compose([EnsureType(), Activations(softmax=True)]) # Ignite evaluator expects batch=(img, label) and returns output=(y_pred, y) at every iteration, # user can add output_transform to return other values diff --git a/3d_classification/torch/densenet_training_dict.py b/3d_classification/torch/densenet_training_dict.py index b16bf09f45..54ead7867f 100644 --- a/3d_classification/torch/densenet_training_dict.py +++ b/3d_classification/torch/densenet_training_dict.py @@ -81,7 +81,7 @@ def main(): ] ) post_pred = Compose([EnsureType(), Activations(softmax=True)]) - post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=2)]) + post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, num_classes=2)]) # Define dataset, data loader check_ds = monai.data.Dataset(data=train_files, transform=train_transforms) diff --git a/3d_segmentation/challenge_baseline/run_net.py b/3d_segmentation/challenge_baseline/run_net.py index 00eb90f546..c0e5e6c82e 100644 --- a/3d_segmentation/challenge_baseline/run_net.py +++ b/3d_segmentation/challenge_baseline/run_net.py @@ -81,11 +81,11 @@ def get_xforms(mode="train", keys=("image", "label")): def get_net(): """returns a unet model instance.""" - n_classes = 2 + num_classes = 2 net = monai.networks.nets.BasicUNet( dimensions=3, in_channels=1, - out_channels=n_classes, + out_channels=num_classes, features=(32, 32, 64, 128, 256, 32), dropout=0.1, ) @@ -172,7 +172,7 @@ def train(data_folder=".", model_folder="runs"): # create evaluator (to be used to measure model quality during training val_post_transform = monai.transforms.Compose( - [EnsureTyped(keys=("pred", "label")), AsDiscreted(keys=("pred", "label"), argmax=(True, False), to_onehot=True, n_classes=2)] + [EnsureTyped(keys=("pred", "label")), AsDiscreted(keys=("pred", "label"), argmax=(True, False), to_onehot=True, num_classes=2)] ) val_handlers = [ ProgressBar(), diff --git a/3d_segmentation/spleen_segmentation_3d.ipynb b/3d_segmentation/spleen_segmentation_3d.ipynb index 08d882fdb7..75bf23f2bc 100644 --- a/3d_segmentation/spleen_segmentation_3d.ipynb +++ b/3d_segmentation/spleen_segmentation_3d.ipynb @@ -471,8 +471,8 @@ "best_metric_epoch = -1\n", "epoch_loss_values = []\n", "metric_values = []\n", - "post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, n_classes=2)])\n", - "post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=2)])\n", + "post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, num_classes=2)])\n", + "post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, num_classes=2)])\n", "\n", "for epoch in range(max_epochs):\n", " print(\"-\" * 10)\n", @@ -720,8 +720,8 @@ " nearest_interp=False,\n", " to_tensor=True,\n", " ),\n", - " AsDiscreted(keys=\"pred\", argmax=True, to_onehot=True, n_classes=2),\n", - " AsDiscreted(keys=\"label\", to_onehot=True, n_classes=2),\n", + " AsDiscreted(keys=\"pred\", argmax=True, to_onehot=True, num_classes=2),\n", + " AsDiscreted(keys=\"label\", to_onehot=True, num_classes=2),\n", "])" ] }, diff --git a/3d_segmentation/spleen_segmentation_3d_lightning.ipynb b/3d_segmentation/spleen_segmentation_3d_lightning.ipynb index 225e1929c6..51e84c959c 100644 --- a/3d_segmentation/spleen_segmentation_3d_lightning.ipynb +++ b/3d_segmentation/spleen_segmentation_3d_lightning.ipynb @@ -241,8 +241,8 @@ " norm=Norm.BATCH,\n", " )\n", " self.loss_function = DiceLoss(to_onehot_y=True, softmax=True)\n", - " self.post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, n_classes=2)])\n", - " self.post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=2)])\n", + " self.post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, num_classes=2)])\n", + " self.post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, num_classes=2)])\n", " self.dice_metric = DiceMetric(include_background=False, reduction=\"mean\", get_not_nans=False)\n", " self.best_val_dice = 0\n", " self.best_val_epoch = 0\n", diff --git a/3d_segmentation/unetr_btcv_segmentation_3d.ipynb b/3d_segmentation/unetr_btcv_segmentation_3d.ipynb index 79590e8d5d..1ef082246b 100644 --- a/3d_segmentation/unetr_btcv_segmentation_3d.ipynb +++ b/3d_segmentation/unetr_btcv_segmentation_3d.ipynb @@ -681,8 +681,8 @@ "\n", "max_iterations = 25000\n", "eval_num = 500\n", - "post_label = AsDiscrete(to_onehot=True, n_classes=14)\n", - "post_pred = AsDiscrete(argmax=True, to_onehot=True, n_classes=14)\n", + "post_label = AsDiscrete(to_onehot=True, num_classes=14)\n", + "post_pred = AsDiscrete(argmax=True, to_onehot=True, num_classes=14)\n", "dice_metric = DiceMetric(include_background=True, reduction=\"mean\", get_not_nans=False)\n", "global_step = 0\n", "dice_val_best = 0.0\n", diff --git a/3d_segmentation/unetr_btcv_segmentation_3d_lightning.ipynb b/3d_segmentation/unetr_btcv_segmentation_3d_lightning.ipynb index 8b3c04eedd..31737bbff9 100644 --- a/3d_segmentation/unetr_btcv_segmentation_3d_lightning.ipynb +++ b/3d_segmentation/unetr_btcv_segmentation_3d_lightning.ipynb @@ -415,8 +415,8 @@ " ).to(device)\n", "\n", " self.loss_function = DiceCELoss(to_onehot_y=True, softmax=True)\n", - " self.post_pred = AsDiscrete(argmax=True, to_onehot=True, n_classes=14)\n", - " self.post_label = AsDiscrete(to_onehot=True, n_classes=14)\n", + " self.post_pred = AsDiscrete(argmax=True, to_onehot=True, num_classes=14)\n", + " self.post_label = AsDiscrete(to_onehot=True, num_classes=14)\n", " self.dice_metric = DiceMetric(\n", " include_background=False, reduction=\"mean\", get_not_nans=False\n", " )\n", diff --git a/acceleration/automatic_mixed_precision.ipynb b/acceleration/automatic_mixed_precision.ipynb index 82c29c2c2e..e415ffe28e 100644 --- a/acceleration/automatic_mixed_precision.ipynb +++ b/acceleration/automatic_mixed_precision.ipynb @@ -352,8 +352,8 @@ " optimizer = torch.optim.Adam(model.parameters(), 1e-4)\n", " scaler = torch.cuda.amp.GradScaler() if amp else None\n", "\n", - " post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, n_classes=2)])\n", - " post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=2)])\n", + " post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, num_classes=2)])\n", + " post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, num_classes=2)])\n", "\n", " dice_metric = DiceMetric(include_background=False, reduction=\"mean\", get_not_nans=False)\n", "\n", diff --git a/acceleration/dataset_type_performance.ipynb b/acceleration/dataset_type_performance.ipynb index 9a9203dd75..2c24728282 100644 --- a/acceleration/dataset_type_performance.ipynb +++ b/acceleration/dataset_type_performance.ipynb @@ -209,8 +209,8 @@ " loss_function = DiceLoss(to_onehot_y=True, softmax=True)\n", " optimizer = torch.optim.Adam(model.parameters(), 1e-4)\n", "\n", - " post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, n_classes=2)])\n", - " post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=2)])\n", + " post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, num_classes=2)])\n", + " post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, num_classes=2)])\n", "\n", " dice_metric = DiceMetric(include_background=True, reduction=\"mean\", get_not_nans=False)\n", "\n", diff --git a/acceleration/fast_training_tutorial.ipynb b/acceleration/fast_training_tutorial.ipynb index 0a1f6a83e8..b8504f43bc 100644 --- a/acceleration/fast_training_tutorial.ipynb +++ b/acceleration/fast_training_tutorial.ipynb @@ -394,8 +394,8 @@ " norm=Norm.BATCH,\n", " ).to(device)\n", "\n", - " post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, n_classes=2)])\n", - " post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=2)])\n", + " post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, num_classes=2)])\n", + " post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, num_classes=2)])\n", "\n", " dice_metric = DiceMetric(include_background=True, reduction=\"mean\", get_not_nans=False)\n", "\n", diff --git a/federated_learning/nvflare/nvflare_example/spleen_example/custom/train_configer.py b/federated_learning/nvflare/nvflare_example/spleen_example/custom/train_configer.py index a5028ba487..1a50ec9af6 100644 --- a/federated_learning/nvflare/nvflare_example/spleen_example/custom/train_configer.py +++ b/federated_learning/nvflare/nvflare_example/spleen_example/custom/train_configer.py @@ -226,7 +226,7 @@ def configure(self): keys=["pred", "label"], argmax=[True, False], to_onehot=True, - n_classes=2, + num_classes=2, ), ] ) diff --git a/federated_learning/nvflare/nvflare_example_docker/spleen_example/custom/train_configer.py b/federated_learning/nvflare/nvflare_example_docker/spleen_example/custom/train_configer.py index a5028ba487..1a50ec9af6 100644 --- a/federated_learning/nvflare/nvflare_example_docker/spleen_example/custom/train_configer.py +++ b/federated_learning/nvflare/nvflare_example_docker/spleen_example/custom/train_configer.py @@ -226,7 +226,7 @@ def configure(self): keys=["pred", "label"], argmax=[True, False], to_onehot=True, - n_classes=2, + num_classes=2, ), ] ) diff --git a/modules/dynunet_pipeline/evaluator.py b/modules/dynunet_pipeline/evaluator.py index 8dd35c3f49..fe9f5be0eb 100644 --- a/modules/dynunet_pipeline/evaluator.py +++ b/modules/dynunet_pipeline/evaluator.py @@ -26,7 +26,7 @@ class DynUNetEvaluator(SupervisedEvaluator): val_data_loader: Ignite engine use data_loader to run, must be torch.DataLoader. network: use the network to run model forward. - n_classes: the number of classes (output channels) for the task. + num_classes: the number of classes (output channels) for the task. epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`. non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously @@ -54,7 +54,7 @@ def __init__( device: torch.device, val_data_loader: DataLoader, network: torch.nn.Module, - n_classes: Union[str, int], + num_classes: Union[str, int], epoch_length: Optional[int] = None, non_blocking: bool = False, prepare_batch: Callable = default_prepare_batch, @@ -83,11 +83,11 @@ def __init__( amp=amp, ) - if not isinstance(n_classes, int): - n_classes = int(n_classes) - self.n_classes = n_classes - self.post_pred = AsDiscrete(argmax=True, to_onehot=True, n_classes=n_classes) - self.post_label = AsDiscrete(to_onehot=True, n_classes=n_classes) + if not isinstance(num_classes, int): + num_classes = int(num_classes) + self.num_classes = num_classes + self.post_pred = AsDiscrete(argmax=True, to_onehot=True, num_classes=num_classes) + self.post_label = AsDiscrete(to_onehot=True, num_classes=num_classes) self.tta_val = tta_val def _iteration( @@ -159,13 +159,13 @@ def _compute_pred(): if resample_flag: # convert the prediction back to the original (after cropped) shape predictions = recovery_prediction( - predictions.numpy(), [self.n_classes, *crop_shape], anisotrophy_flag + predictions.numpy(), [self.num_classes, *crop_shape], anisotrophy_flag ) predictions = torch.tensor(predictions) # put iteration outputs into engine.state engine.state.output = {Keys.IMAGE: inputs, Keys.LABEL: targets.unsqueeze(0)} - engine.state.output[Keys.PRED] = torch.zeros([1, self.n_classes, *original_shape]) + engine.state.output[Keys.PRED] = torch.zeros([1, self.num_classes, *original_shape]) # pad the prediction back to the original shape box_start, box_end = batchdata["bbox"][0] h_start, w_start, d_start = box_start diff --git a/modules/dynunet_pipeline/inference.py b/modules/dynunet_pipeline/inference.py index e49cd1b952..4b4730f64e 100644 --- a/modules/dynunet_pipeline/inference.py +++ b/modules/dynunet_pipeline/inference.py @@ -55,7 +55,7 @@ def inference(args): val_data_loader=test_loader, network=net, output_dir=infer_output_dir, - n_classes=len(properties["labels"]), + num_classes=len(properties["labels"]), inferer=SlidingWindowInferer( roi_size=patch_size[task_id], sw_batch_size=sw_batch_size, diff --git a/modules/dynunet_pipeline/inferrer.py b/modules/dynunet_pipeline/inferrer.py index 21d4ae208e..15bb0a90b9 100644 --- a/modules/dynunet_pipeline/inferrer.py +++ b/modules/dynunet_pipeline/inferrer.py @@ -29,7 +29,7 @@ class DynUNetInferrer(SupervisedEvaluator): torch.DataLoader. network: use the network to run model forward. output_dir: the path to save inferred outputs. - n_classes: the number of classes (output channels) for the task. + num_classes: the number of classes (output channels) for the task. epoch_length: number of iterations for one epoch, default to `len(val_data_loader)`. non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously @@ -58,7 +58,7 @@ def __init__( val_data_loader: DataLoader, network: torch.nn.Module, output_dir: str, - n_classes: Union[str, int], + num_classes: Union[str, int], epoch_length: Optional[int] = None, non_blocking: bool = False, prepare_batch: Callable = default_prepare_batch, @@ -87,12 +87,12 @@ def __init__( amp=amp, ) - if not isinstance(n_classes, int): - n_classes = int(n_classes) - self.post_pred = AsDiscrete(argmax=True, to_onehot=True, n_classes=n_classes) + if not isinstance(num_classes, int): + num_classes = int(num_classes) + self.post_pred = AsDiscrete(argmax=True, to_onehot=True, num_classes=num_classes) self.output_dir = output_dir self.tta_val = tta_val - self.n_classes = n_classes + self.num_classes = num_classes def _iteration( self, engine: Engine, batchdata: Dict[str, Any] @@ -159,7 +159,7 @@ def _compute_pred(): if resample_flag: # convert the prediction back to the original (after cropped) shape predictions = recovery_prediction( - predictions.numpy(), [self.n_classes, *crop_shape], anisotrophy_flag + predictions.numpy(), [self.num_classes, *crop_shape], anisotrophy_flag ) else: predictions = predictions.numpy() diff --git a/modules/dynunet_pipeline/train.py b/modules/dynunet_pipeline/train.py index e0c15f42c9..c1bc8e443c 100644 --- a/modules/dynunet_pipeline/train.py +++ b/modules/dynunet_pipeline/train.py @@ -56,7 +56,7 @@ def validation(args): module=net, device_ids=[device], find_unused_parameters=True ) - n_classes = len(properties["labels"]) + num_classes = len(properties["labels"]) net.eval() @@ -64,7 +64,7 @@ def validation(args): device=device, val_data_loader=val_loader, network=net, - n_classes=n_classes, + num_classes=num_classes, inferer=SlidingWindowInferer( roi_size=patch_size[task_id], sw_batch_size=sw_batch_size, @@ -87,8 +87,8 @@ def validation(args): if local_rank == 0: print(evaluator.state.metrics) results = evaluator.state.metric_details["val_mean_dice"] - if n_classes > 2: - for i in range(n_classes - 1): + if num_classes > 2: + for i in range(num_classes - 1): print( "mean dice for label {} is {}".format(i + 1, results[:, i].mean()) ) @@ -166,7 +166,7 @@ def train(args): device=device, val_data_loader=val_loader, network=net, - n_classes=len(properties["labels"]), + num_classes=len(properties["labels"]), inferer=SlidingWindowInferer( roi_size=patch_size[task_id], sw_batch_size=sw_batch_size, diff --git a/modules/interpretability/covid_classification.ipynb b/modules/interpretability/covid_classification.ipynb index 0a419ccef8..74d7767959 100644 --- a/modules/interpretability/covid_classification.ipynb +++ b/modules/interpretability/covid_classification.ipynb @@ -243,7 +243,7 @@ "])\n", "\n", "y_pred_trans = Compose([EnsureType(), Activations(softmax=True)])\n", - "y_trans = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=num_class)])\n", + "y_trans = Compose([EnsureType(), AsDiscrete(to_onehot=True, num_classes=num_class)])\n", "\n", "all_files = glob(os.path.join(train_dir, \"*.png\"))\n", "random.shuffle(all_files)\n", diff --git a/modules/learning_rate.ipynb b/modules/learning_rate.ipynb index 81510c1b0b..f4e290abd4 100644 --- a/modules/learning_rate.ipynb +++ b/modules/learning_rate.ipynb @@ -253,7 +253,7 @@ "num_classes = train_ds.get_num_classes()\n", "\n", "y_pred_trans = Compose([EnsureType(), Activations(softmax=True)])\n", - "y_trans = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=num_classes)])" + "y_trans = Compose([EnsureType(), AsDiscrete(to_onehot=True, num_classes=num_classes)])" ] }, { diff --git a/modules/postprocessing_transforms.ipynb b/modules/postprocessing_transforms.ipynb index ccd80327a1..98d3982897 100644 --- a/modules/postprocessing_transforms.ipynb +++ b/modules/postprocessing_transforms.ipynb @@ -392,8 +392,8 @@ "epoch_loss_values = []\n", "metric_values = []\n", "\n", - "post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, n_classes=2)])\n", - "post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=2)])\n", + "post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, num_classes=2)])\n", + "post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, num_classes=2)])\n", "\n", "for epoch in range(max_epochs):\n", " print(\"-\" * 10)\n", diff --git a/modules/public_datasets.ipynb b/modules/public_datasets.ipynb index 219e204e3a..1ada6cbb46 100644 --- a/modules/public_datasets.ipynb +++ b/modules/public_datasets.ipynb @@ -1485,7 +1485,7 @@ " inferer=SimpleInferer(),\n", " postprocessing=AsDiscreted(\n", " keys=[\"pred\", \"label\"], argmax=(True, False),\n", - " to_onehot=True, n_classes=3,\n", + " to_onehot=True, num_classes=3,\n", " ),\n", " key_train_metric={\n", " \"train_meandice\": MeanDice(\n", diff --git a/modules/transfer_mmar.ipynb b/modules/transfer_mmar.ipynb index b27a7ff1ba..3d058ffcc5 100644 --- a/modules/transfer_mmar.ipynb +++ b/modules/transfer_mmar.ipynb @@ -642,8 +642,8 @@ "best_metric_epoch = -1\n", "epoch_loss_values = []\n", "metric_values = []\n", - "post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, n_classes=2)])\n", - "post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, n_classes=2)])\n", + "post_pred = Compose([EnsureType(), AsDiscrete(argmax=True, to_onehot=True, num_classes=2)])\n", + "post_label = Compose([EnsureType(), AsDiscrete(to_onehot=True, num_classes=2)])\n", "dice_metric = DiceMetric(include_background=False, reduction=\"mean\", get_not_nans=False)\n", "\n", "for epoch in range(max_epochs):\n", diff --git a/pathology/tumor_detection/ignite/camelyon_train_evaluate.py b/pathology/tumor_detection/ignite/camelyon_train_evaluate.py index e1f0dd829d..136d3a0bb6 100644 --- a/pathology/tumor_detection/ignite/camelyon_train_evaluate.py +++ b/pathology/tumor_detection/ignite/camelyon_train_evaluate.py @@ -164,7 +164,7 @@ def train(cfg): # -------------------------------------------------------------------------- # __________________________________________________________________________ # initialize model - model = TorchVisionFCModel("resnet18", n_classes=1, use_conv=True, pretrained=cfg["pretrain"]) + model = TorchVisionFCModel("resnet18", num_classes=1, use_conv=True, pretrained=cfg["pretrain"]) model = model.to(device) # loss function diff --git a/pathology/tumor_detection/ignite/camelyon_train_evaluate_nvtx_profiling.py b/pathology/tumor_detection/ignite/camelyon_train_evaluate_nvtx_profiling.py index fff16f24f5..82efcce25d 100644 --- a/pathology/tumor_detection/ignite/camelyon_train_evaluate_nvtx_profiling.py +++ b/pathology/tumor_detection/ignite/camelyon_train_evaluate_nvtx_profiling.py @@ -175,7 +175,7 @@ def train(cfg): # -------------------------------------------------------------------------- # __________________________________________________________________________ # initialize model - model = TorchVisionFCModel("resnet18", n_classes=1, use_conv=True, pretrained=cfg["pretrain"]) + model = TorchVisionFCModel("resnet18", num_classes=1, use_conv=True, pretrained=cfg["pretrain"]) model = Range("ResNet18")(model) model = model.to(device)