diff --git a/3d_segmentation/spleen_segmentation_3d.ipynb b/3d_segmentation/spleen_segmentation_3d.ipynb index d070761796..c931724682 100644 --- a/3d_segmentation/spleen_segmentation_3d.ipynb +++ b/3d_segmentation/spleen_segmentation_3d.ipynb @@ -284,7 +284,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", " Spacingd(keys=[\"image\", \"label\"], pixdim=(1.5, 1.5, 2.0), mode=(\"bilinear\", \"nearest\")),\n", " RandCropByPosNegLabeld(\n", @@ -318,7 +318,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", " Spacingd(keys=[\"image\", \"label\"], pixdim=(1.5, 1.5, 2.0), mode=(\"bilinear\", \"nearest\")),\n", " ]\n", @@ -690,7 +690,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\"], source_key=\"image\", allow_smaller=True),\n", " ]\n", ")\n", "\n", @@ -784,7 +784,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\"], source_key=\"image\", allow_smaller=True),\n", " ]\n", ")\n", "\n", diff --git a/3d_segmentation/spleen_segmentation_3d_lightning.ipynb b/3d_segmentation/spleen_segmentation_3d_lightning.ipynb index a755268821..d6459cfbbc 100644 --- a/3d_segmentation/spleen_segmentation_3d_lightning.ipynb +++ b/3d_segmentation/spleen_segmentation_3d_lightning.ipynb @@ -278,7 +278,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " # randomly crop out patch samples from\n", " # big image based on pos / neg ratio\n", " # the image centers of negative samples\n", @@ -321,7 +321,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " ]\n", " )\n", "\n", diff --git a/3d_segmentation/spleen_segmentation_3d_visualization_basic.ipynb b/3d_segmentation/spleen_segmentation_3d_visualization_basic.ipynb index 18ec833dae..e1b1749d35 100644 --- a/3d_segmentation/spleen_segmentation_3d_visualization_basic.ipynb +++ b/3d_segmentation/spleen_segmentation_3d_visualization_basic.ipynb @@ -317,7 +317,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", " Spacingd(keys=[\"image\", \"label\"], pixdim=(1.5, 1.5, 2.0), mode=(\"bilinear\", \"nearest\")),\n", " RandCropByPosNegLabeld(\n", @@ -351,7 +351,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", " Spacingd(keys=[\"image\", \"label\"], pixdim=(1.5, 1.5, 2.0), mode=(\"bilinear\", \"nearest\")),\n", " ]\n", @@ -782,7 +782,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\"], source_key=\"image\", allow_smaller=True),\n", " ]\n", ")\n", "\n", diff --git a/3d_segmentation/swin_unetr_brats21_segmentation_3d.ipynb b/3d_segmentation/swin_unetr_brats21_segmentation_3d.ipynb index 6bb7eaa0c7..1c51c2f398 100644 --- a/3d_segmentation/swin_unetr_brats21_segmentation_3d.ipynb +++ b/3d_segmentation/swin_unetr_brats21_segmentation_3d.ipynb @@ -326,6 +326,7 @@ " keys=[\"image\", \"label\"],\n", " source_key=\"image\",\n", " k_divisible=[roi[0], roi[1], roi[2]],\n", + " allow_smaller=True,\n", " ),\n", " transforms.RandSpatialCropd(\n", " keys=[\"image\", \"label\"],\n", @@ -467,7 +468,6 @@ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "\n", "model = SwinUNETR(\n", - " img_size=roi,\n", " in_channels=4,\n", " out_channels=3,\n", " feature_size=48,\n", diff --git a/3d_segmentation/swin_unetr_btcv_segmentation_3d.ipynb b/3d_segmentation/swin_unetr_btcv_segmentation_3d.ipynb index 1dd0585255..4bd639db57 100644 --- a/3d_segmentation/swin_unetr_btcv_segmentation_3d.ipynb +++ b/3d_segmentation/swin_unetr_btcv_segmentation_3d.ipynb @@ -243,7 +243,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", " Spacingd(\n", " keys=[\"image\", \"label\"],\n", @@ -292,7 +292,7 @@ " [\n", " LoadImaged(keys=[\"image\", \"label\"], ensure_channel_first=True),\n", " ScaleIntensityRanged(keys=[\"image\"], a_min=-175, a_max=250, b_min=0.0, b_max=1.0, clip=True),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", " Spacingd(\n", " keys=[\"image\", \"label\"],\n", @@ -439,7 +439,6 @@ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "\n", "model = SwinUNETR(\n", - " img_size=(96, 96, 96),\n", " in_channels=1,\n", " out_channels=14,\n", " feature_size=48,\n", diff --git a/3d_segmentation/unetr_btcv_segmentation_3d.ipynb b/3d_segmentation/unetr_btcv_segmentation_3d.ipynb index 0ce403cb90..4739d650d5 100644 --- a/3d_segmentation/unetr_btcv_segmentation_3d.ipynb +++ b/3d_segmentation/unetr_btcv_segmentation_3d.ipynb @@ -228,7 +228,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " RandCropByPosNegLabeld(\n", " keys=[\"image\", \"label\"],\n", " label_key=\"label\",\n", @@ -277,7 +277,7 @@ " mode=(\"bilinear\", \"nearest\"),\n", " ),\n", " ScaleIntensityRanged(keys=[\"image\"], a_min=-175, a_max=250, b_min=0.0, b_max=1.0, clip=True),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " ]\n", ")" ] diff --git a/3d_segmentation/unetr_btcv_segmentation_3d_lightning.ipynb b/3d_segmentation/unetr_btcv_segmentation_3d_lightning.ipynb index 3d383bd78d..1fae50750a 100644 --- a/3d_segmentation/unetr_btcv_segmentation_3d_lightning.ipynb +++ b/3d_segmentation/unetr_btcv_segmentation_3d_lightning.ipynb @@ -466,7 +466,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", " Spacingd(\n", " keys=[\"image\", \"label\"],\n", @@ -522,7 +522,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", " Spacingd(\n", " keys=[\"image\", \"label\"],\n", diff --git a/acceleration/automatic_mixed_precision.ipynb b/acceleration/automatic_mixed_precision.ipynb index 5d8c0755c4..dbeddcc7a8 100644 --- a/acceleration/automatic_mixed_precision.ipynb +++ b/acceleration/automatic_mixed_precision.ipynb @@ -199,7 +199,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " # pre-compute foreground and background indexes\n", " # and cache them to accelerate training\n", " FgBgToIndicesd(\n", @@ -241,7 +241,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " ]\n", " )\n", " return train_transforms, val_transforms" diff --git a/acceleration/dataset_type_performance.ipynb b/acceleration/dataset_type_performance.ipynb index 1a5857f0d0..f8c2911c59 100644 --- a/acceleration/dataset_type_performance.ipynb +++ b/acceleration/dataset_type_performance.ipynb @@ -380,7 +380,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " # randomly crop out patch samples from big\n", " # image based on pos / neg ratio\n", " # the image centers of negative samples\n", @@ -420,7 +420,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " ]\n", " )\n", " return train_transforms, val_transforms" diff --git a/acceleration/fast_training_tutorial.ipynb b/acceleration/fast_training_tutorial.ipynb index f394b2f5cf..85b197231b 100644 --- a/acceleration/fast_training_tutorial.ipynb +++ b/acceleration/fast_training_tutorial.ipynb @@ -311,7 +311,7 @@ " clip=True,\n", " ),\n", " ),\n", - " range_func(\"CropForeground\", CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\")),\n", + " range_func(\"CropForeground\", CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True)),\n", " # pre-compute foreground and background indexes\n", " # and cache them to accelerate training\n", " range_func(\n", @@ -368,7 +368,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " ]\n", " if fast:\n", " # convert the data to Tensor without meta, move to GPU and cache to avoid CPU -> GPU sync in every epoch\n", diff --git a/auto3dseg/docs/algorithm_generation.md b/auto3dseg/docs/algorithm_generation.md index dfe54d01d7..24ed6ac973 100644 --- a/auto3dseg/docs/algorithm_generation.md +++ b/auto3dseg/docs/algorithm_generation.md @@ -143,7 +143,12 @@ class DintsAlgo(BundleAlgo): "b_max": 1.0, "clip": True, }, - {"_target_": "CropForegroundd", "keys": ["@image_key", "@label_key"], "source_key": "@image_key"}, + { + "_target_": "CropForegroundd", + "keys": ["@image_key", "@label_key"], + "source_key": "@image_key", + "allow_smaller:" True, + }, ], } diff --git a/auto3dseg/tasks/hecktor22/hecktor_crop_neck_region.py b/auto3dseg/tasks/hecktor22/hecktor_crop_neck_region.py index 55b4c27aa4..352b89667d 100644 --- a/auto3dseg/tasks/hecktor22/hecktor_crop_neck_region.py +++ b/auto3dseg/tasks/hecktor22/hecktor_crop_neck_region.py @@ -31,9 +31,16 @@ def __init__( source_key="image", box_size=[200, 200, 310], allow_missing_keys=True, + allow_smaller=True, **kwargs, ) -> None: - super().__init__(keys=keys, source_key=source_key, allow_missing_keys=allow_missing_keys, **kwargs) + super().__init__( + keys=keys, + source_key=source_key, + allow_missing_keys=allow_missing_keys, + allow_smaller=allow_smaller, + **kwargs, + ) self.box_size = box_size def __call__(self, data, **kwargs): diff --git a/bundle/python_bundle_workflow/scripts/inference.py b/bundle/python_bundle_workflow/scripts/inference.py index a98b47bb64..8e090e7580 100644 --- a/bundle/python_bundle_workflow/scripts/inference.py +++ b/bundle/python_bundle_workflow/scripts/inference.py @@ -48,7 +48,7 @@ class InferenceWorkflow(BundleWorkflow): """ def __init__(self, dataset_dir: str = "./infer"): - super().__init__(workflow="inference") + super().__init__(workflow_type="inference") print_config() # set root log level to INFO and init a evaluation logger, will be used in `StatsHandler` logging.basicConfig(stream=sys.stdout, level=logging.INFO) diff --git a/bundle/python_bundle_workflow/scripts/train.py b/bundle/python_bundle_workflow/scripts/train.py index 29a53b46a1..4a195df4f5 100644 --- a/bundle/python_bundle_workflow/scripts/train.py +++ b/bundle/python_bundle_workflow/scripts/train.py @@ -67,7 +67,7 @@ class TrainWorkflow(BundleWorkflow): """ def __init__(self, dataset_dir: str = "./train"): - super().__init__(workflow="train") + super().__init__(workflow_type="train") print_config() # set root log level to INFO and init a train logger, will be used in `StatsHandler` logging.basicConfig(stream=sys.stdout, level=logging.INFO) diff --git a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb index f8b4afac6d..2360602142 100644 --- a/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb +++ b/bundle/pythonic_usage_guidance/pythonic_bundle_access.ipynb @@ -397,9 +397,7 @@ "metadata": {}, "outputs": [], "source": [ - "# Here we specify `return_state_dict=False` to return an instantiated model only for compatibility, will remove after MONAI v1.5.\n", - "# directly get an instantiated network that loaded the weights.\n", - "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\", return_state_dict=False)\n", + "model = load(name=\"brats_mri_segmentation\", bundle_dir=root_dir, source=\"monaihosting\")\n", "\n", "# directly update the parameters for the model from the bundle.\n", "model = load(\n", @@ -408,7 +406,6 @@ " source=\"monaihosting\",\n", " in_channels=3,\n", " out_channels=1,\n", - " return_state_dict=False,\n", ")\n", "\n", "# using `exclude_vars` to filter loading weights.\n", @@ -417,7 +414,6 @@ " bundle_dir=root_dir,\n", " source=\"monaihosting\",\n", " copy_model_args={\"exclude_vars\": \"convInit|conv_final\"},\n", - " return_state_dict=False,\n", ")\n", "\n", "# pass model and return an instantiated network that loaded the weights.\n", diff --git a/deepgrow/ignite/train.py b/deepgrow/ignite/train.py index df0917f8bd..e1f6ad5cba 100644 --- a/deepgrow/ignite/train.py +++ b/deepgrow/ignite/train.py @@ -92,7 +92,7 @@ def get_pre_transforms(roi_size, model_size, dimensions): t = [ LoadImaged(keys=("image", "label")), EnsureChannelFirstd(keys=("image", "label"), channel_dim="no_channel"), - SpatialCropForegroundd(keys=("image", "label"), source_key="label", spatial_size=roi_size), + SpatialCropForegroundd(keys=("image", "label"), source_key="label", spatial_size=roi_size, allow_smaller=True), Resized(keys=("image", "label"), spatial_size=model_size, mode=("area", "nearest")), NormalizeIntensityd(keys="image", subtrahend=208.0, divisor=388.0), ] diff --git a/deployment/Triton/models/monai_covid/1/model.py b/deployment/Triton/models/monai_covid/1/model.py index 34595f1bec..3e8f6442c3 100644 --- a/deployment/Triton/models/monai_covid/1/model.py +++ b/deployment/Triton/models/monai_covid/1/model.py @@ -112,7 +112,7 @@ def initialize(self, args): LoadImage(reader="NibabelReader", image_only=True, dtype=np.float32), EnsureChannelFirst(channel_dim="no_channel"), ScaleIntensityRange(a_min=-1000, a_max=500, b_min=0.0, b_max=1.0, clip=True), - CropForeground(margin=5), + CropForeground(margin=5, allow_smaller=True), Resize([192, 192, 64], mode="area"), EnsureChannelFirst(channel_dim="no_channel"), ToTensor(), diff --git a/experiment_management/spleen_segmentation_aim.ipynb b/experiment_management/spleen_segmentation_aim.ipynb index 8d301834d8..f5da6a9e93 100644 --- a/experiment_management/spleen_segmentation_aim.ipynb +++ b/experiment_management/spleen_segmentation_aim.ipynb @@ -251,7 +251,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " RandCropByPosNegLabeld(\n", " keys=[\"image\", \"label\"],\n", " label_key=\"label\",\n", @@ -285,7 +285,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " ]\n", ")" ] diff --git a/experiment_management/spleen_segmentation_mlflow.ipynb b/experiment_management/spleen_segmentation_mlflow.ipynb index b7bbc0705a..0f11b2a004 100644 --- a/experiment_management/spleen_segmentation_mlflow.ipynb +++ b/experiment_management/spleen_segmentation_mlflow.ipynb @@ -249,7 +249,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " RandCropByPosNegLabeld(\n", " keys=[\"image\", \"label\"],\n", " label_key=\"label\",\n", @@ -283,7 +283,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " ]\n", ")" ] diff --git a/model_zoo/transfer_learning_with_bundle/evaluate.py b/model_zoo/transfer_learning_with_bundle/evaluate.py index 6de0b67744..0ad0c879c7 100644 --- a/model_zoo/transfer_learning_with_bundle/evaluate.py +++ b/model_zoo/transfer_learning_with_bundle/evaluate.py @@ -37,7 +37,6 @@ AsDiscrete, AsDiscreted, Compose, - CropForegroundd, EnsureChannelFirstd, Invertd, LoadImaged, diff --git a/model_zoo/transfer_learning_with_bundle/train.py b/model_zoo/transfer_learning_with_bundle/train.py index 77f4c8da10..9d87dcb6d6 100644 --- a/model_zoo/transfer_learning_with_bundle/train.py +++ b/model_zoo/transfer_learning_with_bundle/train.py @@ -35,15 +35,11 @@ from monai.transforms import ( Activations, AsDiscrete, - AsDiscreted, Compose, - CropForegroundd, EnsureChannelFirstd, - Invertd, LoadImaged, Orientationd, RandCropByPosNegLabeld, - SaveImaged, ScaleIntensityRanged, Spacingd, ) diff --git a/modules/dynunet_pipeline/transforms.py b/modules/dynunet_pipeline/transforms.py index acb86396ca..a02d82b5c8 100644 --- a/modules/dynunet_pipeline/transforms.py +++ b/modules/dynunet_pipeline/transforms.py @@ -277,7 +277,7 @@ def __init__( self.mean = normalize_values[0] self.std = normalize_values[1] self.training = False - self.crop_foreg = CropForegroundd(keys=["image", "label"], source_key="image") + self.crop_foreg = CropForegroundd(keys=["image", "label"], source_key="image", allow_smaller=True) self.normalize_intensity = NormalizeIntensity(nonzero=True, channel_wise=True) if model_mode in ["train"]: self.training = True @@ -310,7 +310,7 @@ def __call__(self, data): image, label = cropped_data["image"], cropped_data["label"] else: d["original_shape"] = np.array(image.shape[1:]) - box_start, box_end = generate_spatial_bounding_box(image) + box_start, box_end = generate_spatial_bounding_box(image, allow_smaller=True) image = SpatialCrop(roi_start=box_start, roi_end=box_end)(image) d["bbox"] = np.vstack([box_start, box_end]) d["crop_shape"] = np.array(image.shape[1:]) diff --git a/modules/integrate_3rd_party_transforms.ipynb b/modules/integrate_3rd_party_transforms.ipynb index 04429e1888..191efb283a 100644 --- a/modules/integrate_3rd_party_transforms.ipynb +++ b/modules/integrate_3rd_party_transforms.ipynb @@ -210,7 +210,7 @@ " Orientationd(keys=[\"image\", \"label\"], axcodes=\"RAS\"),\n", " Spacingd(keys=[\"image\", \"label\"], pixdim=(1.5, 1.5, 2.0), mode=(\"bilinear\", \"nearest\")),\n", " ScaleIntensityRanged(keys=[\"image\"], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " EnsureTyped(keys=[\"image\", \"label\"], data_type=\"numpy\"),\n", "]" ] diff --git a/modules/inverse_transforms_and_test_time_augmentations.ipynb b/modules/inverse_transforms_and_test_time_augmentations.ipynb index 718767dd8a..d6ebd5e5e6 100644 --- a/modules/inverse_transforms_and_test_time_augmentations.ipynb +++ b/modules/inverse_transforms_and_test_time_augmentations.ipynb @@ -293,7 +293,7 @@ " padding_mode=\"zeros\",\n", " mode=(\"bilinear\", \"nearest\"),\n", " ),\n", - " CropForegroundd(keys, source_key=\"image\"),\n", + " CropForegroundd(keys, source_key=\"image\", allow_smaller=True),\n", " DivisiblePadd(keys, 16),\n", " ScaleIntensityd(\"image\"),\n", " ]\n", diff --git a/modules/postprocessing_transforms.ipynb b/modules/postprocessing_transforms.ipynb index 2fe2e4794a..f3db627edc 100644 --- a/modules/postprocessing_transforms.ipynb +++ b/modules/postprocessing_transforms.ipynb @@ -266,7 +266,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " # randomly crop out patch samples from big image\n", " # based on pos / neg ratio. the image centers\n", " # of negative samples must be in valid image area\n", @@ -297,7 +297,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " EnsureTyped(keys=[\"image\", \"label\"]),\n", " ]\n", ")" diff --git a/modules/transform_visualization.ipynb b/modules/transform_visualization.ipynb index b6e96f8444..90e0632494 100644 --- a/modules/transform_visualization.ipynb +++ b/modules/transform_visualization.ipynb @@ -207,7 +207,7 @@ " Orientationd(keys=[\"image\", \"label\"], axcodes=\"PLS\"),\n", " Spacingd(keys=[\"image\", \"label\"], pixdim=(1.5, 1.5, 2.0), mode=(\"bilinear\", \"nearest\")),\n", " ScaleIntensityRanged(keys=[\"image\"], a_min=-57, a_max=164, b_min=0.0, b_max=1.0, clip=True),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " ]\n", ")" ] diff --git a/modules/transforms_metatensor.ipynb b/modules/transforms_metatensor.ipynb index 64c5b2209a..2358ec3d18 100644 --- a/modules/transforms_metatensor.ipynb +++ b/modules/transforms_metatensor.ipynb @@ -328,7 +328,7 @@ " mt.LoadImageD(keys, image_only=True),\n", " mt.EnsureChannelFirstD(keys),\n", " mt.OrientationD(keys, \"RAS\"),\n", - " mt.CropForegroundD(keys, source_key=\"seg\"),\n", + " mt.CropForegroundD(keys, source_key=\"seg\", allow_smaller=True),\n", " mt.ScaleIntensityRangePercentilesD(\"img\", 5, 95, b_min=0.01, b_max=1.0, clip=True),\n", " mt.SpacingD(keys, pixdim=[1.2, 1.5, 1.5], mode=(\"bilinear\", \"nearest\")),\n", " mt.DataStatsD(keys), # this transform shows input data properties for debugging\n", @@ -780,7 +780,7 @@ " mt.LoadImageD(keys, image_only=True),\n", " mt.EnsureChannelFirstD(keys),\n", " mt.OrientationD(keys, \"RAS\"),\n", - " mt.CropForegroundD(keys, source_key=\"seg\"),\n", + " mt.CropForegroundD(keys, source_key=\"seg\", allow_smaller=True),\n", " mt.ScaleIntensityRangePercentilesD(\"img\", 5, 95, b_min=0.01, b_max=1.0, clip=True),\n", " mt.SpacingD(keys, pixdim=[1.2, 1.5, 1.5], mode=(\"bilinear\", \"nearest\")),\n", " mt.DivisiblePadD(keys, 7, mode=\"replicate\"),\n", diff --git a/modules/transforms_update_meta_data.ipynb b/modules/transforms_update_meta_data.ipynb index 91c7a2d417..1090bd1515 100644 --- a/modules/transforms_update_meta_data.ipynb +++ b/modules/transforms_update_meta_data.ipynb @@ -227,7 +227,7 @@ " mt.LoadImaged(keys),\n", " mt.EnsureChannelFirstd(keys),\n", " mt.Orientationd(keys, \"RAI\"),\n", - " mt.CropForegroundd(keys, source_key=\"image\"),\n", + " mt.CropForegroundd(keys, source_key=\"image\", allow_smaller=True),\n", " mt.Spacingd(keys, pixdim=[0.5, 0.5, 1], mode=(\"bilinear\", \"nearest\")),\n", " mt.ScaleIntensityd(\"image\"),\n", " mt.DivisiblePadd(keys, 8),\n", diff --git a/performance_profiling/radiology/train_base_nvtx.py b/performance_profiling/radiology/train_base_nvtx.py index b2f6373e69..ff54fbb7ff 100644 --- a/performance_profiling/radiology/train_base_nvtx.py +++ b/performance_profiling/radiology/train_base_nvtx.py @@ -92,7 +92,7 @@ clip=True, ) ), - Range()(CropForegroundd(keys=["image", "label"], source_key="image")), + Range()(CropForegroundd(keys=["image", "label"], source_key="image", allow_smaller=True)), Range("RandCrop")( RandCropByPosNegLabeld( keys=["image", "label"], @@ -127,7 +127,7 @@ b_max=1.0, clip=True, ), - CropForegroundd(keys=["image", "label"], source_key="image"), + CropForegroundd(keys=["image", "label"], source_key="image", allow_smaller=True), EnsureTyped(keys=["image", "label"]), ] ) diff --git a/performance_profiling/radiology/train_fast_nvtx.py b/performance_profiling/radiology/train_fast_nvtx.py index 4013a36110..029e192c56 100644 --- a/performance_profiling/radiology/train_fast_nvtx.py +++ b/performance_profiling/radiology/train_fast_nvtx.py @@ -96,7 +96,7 @@ clip=True, ) ), - Range()(CropForegroundd(keys=["image", "label"], source_key="image")), + Range()(CropForegroundd(keys=["image", "label"], source_key="image", allow_smaller=True)), # pre-compute foreground and background indexes # and cache them to accelerate training Range("Indexing")( @@ -142,7 +142,7 @@ b_max=1.0, clip=True, ), - CropForegroundd(keys=["image", "label"], source_key="image"), + CropForegroundd(keys=["image", "label"], source_key="image", allow_smaller=True), EnsureTyped(keys=["image", "label"]), ToDeviced(keys=["image", "label"], device="cuda:0"), ] diff --git a/self_supervised_pretraining/swinunetr_pretrained/swinunetr_finetune.ipynb b/self_supervised_pretraining/swinunetr_pretrained/swinunetr_finetune.ipynb index d59b140778..24dcb3f877 100644 --- a/self_supervised_pretraining/swinunetr_pretrained/swinunetr_finetune.ipynb +++ b/self_supervised_pretraining/swinunetr_pretrained/swinunetr_finetune.ipynb @@ -203,7 +203,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " RandCropByPosNegLabeld(\n", " keys=[\"image\", \"label\"],\n", " label_key=\"label\",\n", @@ -253,7 +253,7 @@ " mode=(\"bilinear\", \"nearest\"),\n", " ),\n", " ScaleIntensityRanged(keys=[\"image\"], a_min=-175, a_max=250, b_min=0.0, b_max=1.0, clip=True),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " ToTensord(keys=[\"image\", \"label\"]),\n", " ]\n", ")" @@ -318,7 +318,6 @@ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "\n", "model = SwinUNETR(\n", - " img_size=(96, 96, 96),\n", " in_channels=1,\n", " out_channels=14,\n", " feature_size=48,\n", diff --git a/self_supervised_pretraining/vit_unetr_ssl/multi_gpu/mgpu_ssl_train.py b/self_supervised_pretraining/vit_unetr_ssl/multi_gpu/mgpu_ssl_train.py index 3364f04925..592695a06a 100644 --- a/self_supervised_pretraining/vit_unetr_ssl/multi_gpu/mgpu_ssl_train.py +++ b/self_supervised_pretraining/vit_unetr_ssl/multi_gpu/mgpu_ssl_train.py @@ -97,7 +97,7 @@ def main(args): b_max=1.0, clip=True, ), - CropForegroundd(keys=["image"], source_key="image"), + CropForegroundd(keys=["image"], source_key="image", allow_smaller=True), SpatialPadd(keys=["image"], spatial_size=(96, 96, 96)), RandSpatialCropSamplesd(keys=["image"], roi_size=(96, 96, 96), random_size=False, num_samples=2), CopyItemsd(keys=["image"], times=2, names=["gt_image", "image_2"], allow_missing_keys=False), diff --git a/self_supervised_pretraining/vit_unetr_ssl/ssl_finetune.ipynb b/self_supervised_pretraining/vit_unetr_ssl/ssl_finetune.ipynb index 7f5a4121c6..c28b5a34c7 100644 --- a/self_supervised_pretraining/vit_unetr_ssl/ssl_finetune.ipynb +++ b/self_supervised_pretraining/vit_unetr_ssl/ssl_finetune.ipynb @@ -190,7 +190,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " RandCropByPosNegLabeld(\n", " keys=[\"image\", \"label\"],\n", " label_key=\"label\",\n", @@ -240,7 +240,7 @@ " mode=(\"bilinear\", \"nearest\"),\n", " ),\n", " ScaleIntensityRanged(keys=[\"image\"], a_min=-175, a_max=250, b_min=0.0, b_max=1.0, clip=True),\n", - " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\", \"label\"], source_key=\"image\", allow_smaller=True),\n", " ToTensord(keys=[\"image\", \"label\"]),\n", " ]\n", ")" diff --git a/self_supervised_pretraining/vit_unetr_ssl/ssl_train.ipynb b/self_supervised_pretraining/vit_unetr_ssl/ssl_train.ipynb index c730415b4c..df492ee5cd 100644 --- a/self_supervised_pretraining/vit_unetr_ssl/ssl_train.ipynb +++ b/self_supervised_pretraining/vit_unetr_ssl/ssl_train.ipynb @@ -191,7 +191,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\"], source_key=\"image\", allow_smaller=True),\n", " SpatialPadd(keys=[\"image\"], spatial_size=(96, 96, 96)),\n", " RandSpatialCropSamplesd(keys=[\"image\"], roi_size=(96, 96, 96), random_size=False, num_samples=2),\n", " CopyItemsd(keys=[\"image\"], times=2, names=[\"gt_image\", \"image_2\"], allow_missing_keys=False),\n", diff --git a/vista_3d/vista3d_spleen_finetune.ipynb b/vista_3d/vista3d_spleen_finetune.ipynb index 44ecec0a9f..a8564ce16a 100644 --- a/vista_3d/vista3d_spleen_finetune.ipynb +++ b/vista_3d/vista3d_spleen_finetune.ipynb @@ -768,7 +768,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\"], source_key=\"image\", allow_smaller=True),\n", " Relabeld(keys=\"label\", label_mappings=label_mappings, dtype=torch.uint8),\n", " ]\n", ")\n", @@ -884,7 +884,7 @@ " b_max=1.0,\n", " clip=True,\n", " ),\n", - " CropForegroundd(keys=[\"image\"], source_key=\"image\"),\n", + " CropForegroundd(keys=[\"image\"], source_key=\"image\", allow_smaller=True),\n", " ]\n", ")\n", "\n",