From 9cfac75528141dbe8f5136d81c4eec5238e672c7 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Tue, 13 Dec 2022 13:20:06 +0000 Subject: [PATCH 1/4] fixes discussion #5678 Signed-off-by: Wenqi Li --- monai/data/image_reader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/data/image_reader.py b/monai/data/image_reader.py index 91fa3b89d8..6919c10ffe 100644 --- a/monai/data/image_reader.py +++ b/monai/data/image_reader.py @@ -647,7 +647,7 @@ def _get_meta_dict(self, img) -> Dict: """ - metadata = img.to_json_dict() + metadata = img.to_json_dict(suppress_invalid_tags=True) if self.prune_metadata: prune_metadata = {} From 03e62819aa648ca76af18476741aca4850c9728d Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Tue, 13 Dec 2022 14:04:31 +0000 Subject: [PATCH 2/4] fixes wrong warn import Signed-off-by: Wenqi Li --- monai/losses/contrastive.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/monai/losses/contrastive.py b/monai/losses/contrastive.py index ad53269f82..e0ed4ceab0 100644 --- a/monai/losses/contrastive.py +++ b/monai/losses/contrastive.py @@ -9,7 +9,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from distutils.log import warn +from warnings import warn import torch from torch.nn import functional as F From 9e8454572cef2aa8d255223f4b91facedc194082 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Tue, 13 Dec 2022 14:45:13 +0000 Subject: [PATCH 3/4] itk 5.3 no py constraint Signed-off-by: Wenqi Li --- requirements-dev.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 69f24c0dd1..84655dc828 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -3,7 +3,7 @@ pytorch-ignite==0.4.10 gdown>=4.4.0 scipy -itk>=5.2; python_version < "3.10" +itk>=5.2 nibabel pillow!=8.3.0 # https://github.com/python-pillow/Pillow/issues/5571 tensorboard From 813398c792c11d6646515a2e4a174d46578ed263 Mon Sep 17 00:00:00 2001 From: Wenqi Li Date: Tue, 13 Dec 2022 15:56:18 +0000 Subject: [PATCH 4/4] fixes typos Signed-off-by: Wenqi Li --- monai/apps/detection/networks/retinanet_detector.py | 2 +- monai/apps/nuclick/transforms.py | 12 ++++++------ monai/apps/pathology/engines/utils.py | 2 +- monai/apps/reconstruction/transforms/array.py | 2 +- monai/apps/reconstruction/transforms/dictionary.py | 4 ++-- monai/bundle/scripts.py | 10 ++++++---- monai/bundle/utils.py | 2 +- monai/data/box_utils.py | 2 +- monai/data/dataset.py | 4 ++-- monai/data/fft_utils.py | 4 ++-- monai/data/image_writer.py | 2 +- monai/data/wsi_reader.py | 2 +- monai/engines/utils.py | 4 ++-- monai/fl/client/monai_algo.py | 2 +- monai/handlers/mlflow_handler.py | 2 +- monai/handlers/panoptic_quality.py | 4 ++-- monai/inferers/inferer.py | 2 +- monai/metrics/generalized_dice.py | 2 +- monai/metrics/panoptic_quality.py | 8 ++++---- monai/metrics/utils.py | 2 +- monai/networks/blocks/encoder.py | 4 ++-- monai/networks/nets/flexible_unet.py | 8 ++++---- monai/networks/nets/segresnet_ds.py | 4 ++-- monai/optimizers/lr_scheduler.py | 2 +- monai/utils/enums.py | 2 +- monai/utils/misc.py | 2 +- monai/visualize/occlusion_sensitivity.py | 2 +- tests/test_flexible_unet.py | 6 +++--- 28 files changed, 53 insertions(+), 51 deletions(-) diff --git a/monai/apps/detection/networks/retinanet_detector.py b/monai/apps/detection/networks/retinanet_detector.py index 4c6f165439..5064304cbb 100644 --- a/monai/apps/detection/networks/retinanet_detector.py +++ b/monai/apps/detection/networks/retinanet_detector.py @@ -604,7 +604,7 @@ def postprocess_detections( ) -> List[Dict[str, Tensor]]: """ Postprocessing to generate detection result from classification logits and box regression. - Use self.box_selector to select the final outut boxes for each image. + Use self.box_selector to select the final output boxes for each image. Args: head_outputs_reshape: reshaped head_outputs. ``head_output_reshape[self.cls_key]`` is a Tensor diff --git a/monai/apps/nuclick/transforms.py b/monai/apps/nuclick/transforms.py index 73e4e7485e..f080961e4c 100644 --- a/monai/apps/nuclick/transforms.py +++ b/monai/apps/nuclick/transforms.py @@ -315,13 +315,13 @@ def __call__(self, data): mask = d[self.label] if isinstance(d[self.label], torch.Tensor) else torch.from_numpy(d[self.label]) inc_sig = self.inclusion_map(mask[0], dtype=image.dtype) - inc_sig = self._apply_gaussion(inc_sig) + inc_sig = self._apply_gaussian(inc_sig) if self.add_exclusion_map: others = d[self.others] if isinstance(d[self.others], torch.Tensor) else torch.from_numpy(d[self.others]) exc_sig = self.exclusion_map( others[0], dtype=image.dtype, drop_rate=self.drop_rate, jitter_range=self.jitter_range ) - exc_sig = self._apply_gaussion(exc_sig) + exc_sig = self._apply_gaussian(exc_sig) image = torch.cat((image, inc_sig[None], exc_sig[None]), dim=0) else: image = torch.cat((image, inc_sig[None]), dim=0) @@ -329,7 +329,7 @@ def __call__(self, data): d[self.image] = image if isinstance(d[self.image], torch.Tensor) else convert_to_numpy(image) return d - def _apply_gaussion(self, t): + def _apply_gaussian(self, t): if not self.gaussian or torch.count_nonzero(t) == 0: return t x = GaussianFilter(spatial_dims=2, truncated=self.truncated, sigma=self.sigma)(t.unsqueeze(0).unsqueeze(0)) @@ -500,19 +500,19 @@ def get_patches_and_signals(self, img, click_map, bounding_boxes, cx, cy, x, y): this_click_map[cx[i], cy[i]] = 1 nuc_points = this_click_map[x_start:x_end, y_start:y_end] - nuc_points = self._apply_gaussion(nuc_points) + nuc_points = self._apply_gaussian(nuc_points) if self.add_exclusion_map: others_click_map = ((click_map - this_click_map) > 0).type(img.dtype) other_points = others_click_map[x_start:x_end, y_start:y_end] - other_points = self._apply_gaussion(other_points) + other_points = self._apply_gaussian(other_points) patches.append(torch.cat([patch, nuc_points[None], other_points[None]])) else: patches.append(torch.cat([patch, nuc_points[None]])) return torch.stack(patches) - def _apply_gaussion(self, t): + def _apply_gaussian(self, t): if not self.gaussian or torch.count_nonzero(t) == 0: return t x = GaussianFilter(spatial_dims=2, truncated=self.truncated, sigma=self.sigma)(t.unsqueeze(0).unsqueeze(0)) diff --git a/monai/apps/pathology/engines/utils.py b/monai/apps/pathology/engines/utils.py index 3a190a146b..895638a01b 100644 --- a/monai/apps/pathology/engines/utils.py +++ b/monai/apps/pathology/engines/utils.py @@ -28,7 +28,7 @@ class PrepareBatchHoVerNet(PrepareBatch): Args: extra_keys: If a sequence of strings is provided, values from the input dictionary are extracted from - those keys and passed to the nework as extra positional arguments. + those keys and passed to the network as extra positional arguments. """ def __init__(self, extra_keys: Sequence[str]) -> None: diff --git a/monai/apps/reconstruction/transforms/array.py b/monai/apps/reconstruction/transforms/array.py index ed58439d29..cd2936de41 100644 --- a/monai/apps/reconstruction/transforms/array.py +++ b/monai/apps/reconstruction/transforms/array.py @@ -50,7 +50,7 @@ def __init__( same length as center_fractions. If multiple values are provided, then one of these is chosen uniformly each time. spatial_dims: Number of spatial dims (e.g., it's 2 for a 2D data; - it's also 2 for psuedo-3D datasets like the fastMRI dataset). + it's also 2 for pseudo-3D datasets like the fastMRI dataset). The last spatial dim is selected for sampling. For the fastMRI dataset, k-space has the form (...,num_slices,num_coils,H,W) and sampling is done along W. For a general 3D data with the diff --git a/monai/apps/reconstruction/transforms/dictionary.py b/monai/apps/reconstruction/transforms/dictionary.py index baa9bdb2ce..4f3a2e03cf 100644 --- a/monai/apps/reconstruction/transforms/dictionary.py +++ b/monai/apps/reconstruction/transforms/dictionary.py @@ -83,7 +83,7 @@ class RandomKspaceMaskd(RandomizableTransform, MapTransform): same length as center_fractions. If multiple values are provided, then one of these is chosen uniformly each time. spatial_dims: Number of spatial dims (e.g., it's 2 for a 2D data; it's - also 2 for psuedo-3D datasets like the fastMRI dataset). + also 2 for pseudo-3D datasets like the fastMRI dataset). The last spatial dim is selected for sampling. For the fastMRI dataset, k-space has the form (...,num_slices,num_coils,H,W) and sampling is done along W. For a general 3D data with the @@ -151,7 +151,7 @@ class EquispacedKspaceMaskd(RandomKspaceMaskd): length as center_fractions. If multiple values are provided, then one of these is chosen uniformly each time. spatial_dims: Number of spatial dims (e.g., it's 2 for a 2D data; - it's also 2 for psuedo-3D datasets like the fastMRI dataset). + it's also 2 for pseudo-3D datasets like the fastMRI dataset). The last spatial dim is selected for sampling. For the fastMRI dataset, k-space has the form (...,num_slices,num_coils,H,W) and sampling is done along W. For a general 3D data with the shape diff --git a/monai/bundle/scripts.py b/monai/bundle/scripts.py index d61d4105ec..cf9be1f98d 100644 --- a/monai/bundle/scripts.py +++ b/monai/bundle/scripts.py @@ -366,7 +366,7 @@ def get_all_bundles_list( """ Get all bundles names (and the latest versions) that are stored in the release of specified repository with the provided tag. The default values of arguments correspond to the release of MONAI model zoo. - In order to increase the rate limits of calling GIthub APIs, you can input your personal access token. + In order to increase the rate limits of calling Github APIs, you can input your personal access token. Please check the following link for more details about rate limiting: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting @@ -401,7 +401,7 @@ def get_bundle_versions( """ Get the latest version, as well as all existing versions of a bundle that is stored in the release of specified repository with the provided tag. - In order to increase the rate limits of calling GIthub APIs, you can input your personal access token. + In order to increase the rate limits of calling Github APIs, you can input your personal access token. Please check the following link for more details about rate limiting: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting @@ -439,7 +439,7 @@ def get_bundle_info( Get all information (include "id", "name", "size", "download_count", "browser_download_url", "created_at", "updated_at") of a bundle with the specified bundle name and version. - In order to increase the rate limits of calling GIthub APIs, you can input your personal access token. + In order to increase the rate limits of calling Github APIs, you can input your personal access token. Please check the following link for more details about rate limiting: https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting @@ -544,7 +544,7 @@ def run( logging_file: config file for `logging` module in the program, default to `None`. for more details: https://docs.python.org/3/library/logging.config.html#logging.config.fileConfig. tracking: enable the experiment tracking feature at runtime with optionally configurable and extensible. - if "mlflow", will add `MLFlowHandler` to the parsed bundle with default loggging settings, + if "mlflow", will add `MLFlowHandler` to the parsed bundle with default logging settings, if other string, treat it as file path to load the logging settings, if `dict`, treat it as logging settings, otherwise, use all the default settings. will patch the target config content with `tracking handlers` and the top-level items of `configs`. @@ -925,6 +925,8 @@ def init_bundle( network: if given instead of ckpt_file this network's weights will be stored in bundle dataset_license: if `True`, a default license file called "data_license.txt" will be produced. This file is required if there are any license conditions stated for data your bundle uses. + metadata_str: optional metadata string to write to bundle, if not given a default will be used. + inference_str: optional inference string to write to bundle, if not given a default will be used. """ if metadata_str is None: metadata_str = DEFAULT_METADATA diff --git a/monai/bundle/utils.py b/monai/bundle/utils.py index 77112b0db3..33ff3ff28f 100644 --- a/monai/bundle/utils.py +++ b/monai/bundle/utils.py @@ -153,7 +153,7 @@ }, } -DEFAULT_EXP_MGMT_SETTINGS = {"mlflow": DEFAULT_MLFLOW_SETTINGS} # default expriment management settings +DEFAULT_EXP_MGMT_SETTINGS = {"mlflow": DEFAULT_MLFLOW_SETTINGS} # default experiment management settings def load_bundle_config(bundle_path: str, *config_names, **load_kw_args) -> Any: diff --git a/monai/data/box_utils.py b/monai/data/box_utils.py index a1e321b623..162c7cae26 100644 --- a/monai/data/box_utils.py +++ b/monai/data/box_utils.py @@ -1055,7 +1055,7 @@ def non_max_suppression( boxes_t, *_ = convert_data_type(boxes, torch.Tensor) scores_t, *_ = convert_to_dst_type(scores, boxes_t) - # sort boxes in desending order according to the scores + # sort boxes in descending order according to the scores sort_idxs = torch.argsort(scores_t, dim=0, descending=True) boxes_sort = deepcopy(boxes_t)[sort_idxs, :] diff --git a/monai/data/dataset.py b/monai/data/dataset.py index e9289b2882..1e9d67f358 100644 --- a/monai/data/dataset.py +++ b/monai/data/dataset.py @@ -978,10 +978,10 @@ class SmartCacheDataset(Randomizable, CacheDataset): will take the minimum of (cache_num, data_length x cache_rate, data_length). num_init_workers: the number of worker threads to initialize the cache for first epoch. If num_init_workers is None then the number returned by os.cpu_count() is used. - If a value less than 1 is speficied, 1 will be used instead. + If a value less than 1 is specified, 1 will be used instead. num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch. If num_replace_workers is None then the number returned by os.cpu_count() is used. - If a value less than 1 is speficied, 1 will be used instead. + If a value less than 1 is specified, 1 will be used instead. progress: whether to display a progress bar when caching for the first epoch. shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch. it will not modify the original input data sequence in-place. diff --git a/monai/data/fft_utils.py b/monai/data/fft_utils.py index 2d38beed5e..19083aa711 100644 --- a/monai/data/fft_utils.py +++ b/monai/data/fft_utils.py @@ -19,7 +19,7 @@ def ifftn_centered(ksp: NdarrayOrTensor, spatial_dims: int, is_complex: bool = True) -> NdarrayOrTensor: """ Pytorch-based ifft for spatial_dims-dim signals. "centered" means this function automatically takes care - of the required ifft and fft shifts. This function calls monai.metworks.blocks.fft_utils_t.ifftn_centered_t. + of the required ifft and fft shifts. This function calls monai.networks.blocks.fft_utils_t.ifftn_centered_t. This is equivalent to do fft in numpy based on numpy.fft.ifftn, numpy.fft.fftshift, and numpy.fft.ifftshift Args: @@ -58,7 +58,7 @@ def ifftn_centered(ksp: NdarrayOrTensor, spatial_dims: int, is_complex: bool = T def fftn_centered(im: NdarrayOrTensor, spatial_dims: int, is_complex: bool = True) -> NdarrayOrTensor: """ Pytorch-based fft for spatial_dims-dim signals. "centered" means this function automatically takes care - of the required ifft and fft shifts. This function calls monai.metworks.blocks.fft_utils_t.fftn_centered_t. + of the required ifft and fft shifts. This function calls monai.networks.blocks.fft_utils_t.fftn_centered_t. This is equivalent to do ifft in numpy based on numpy.fft.fftn, numpy.fft.fftshift, and numpy.fft.ifftshift Args: diff --git a/monai/data/image_writer.py b/monai/data/image_writer.py index 935941bc26..8d42d032c9 100644 --- a/monai/data/image_writer.py +++ b/monai/data/image_writer.py @@ -784,7 +784,7 @@ def resample_and_clip( Args: data_array: input data array. This method assumes the 'channel-last' format. output_spatial_shape: output spatial shape. - mode: interpolation mode, defautl is ``InterpolateMode.BICUBIC``. + mode: interpolation mode, default is ``InterpolateMode.BICUBIC``. """ data: np.ndarray = convert_data_type(data_array, np.ndarray)[0] diff --git a/monai/data/wsi_reader.py b/monai/data/wsi_reader.py index 0d3924182c..d18d40935a 100644 --- a/monai/data/wsi_reader.py +++ b/monai/data/wsi_reader.py @@ -616,7 +616,7 @@ def get_mpp(self, wsi, level: Optional[int] = None) -> Tuple[float, float]: unit = wsi.properties["tiff.ResolutionUnit"] if unit == "centimeter": factor = 10000.0 - elif unit == "milimeter": + elif unit == "millimeter": factor = 1000.0 elif unit == "micrometer": factor = 1.0 diff --git a/monai/engines/utils.py b/monai/engines/utils.py index 34a04bc13b..9d19737ab5 100644 --- a/monai/engines/utils.py +++ b/monai/engines/utils.py @@ -108,7 +108,7 @@ def default_prepare_batch( If `CommonKeys.REALS` is present this is returned with None. All returned tensors are moved to the given device using the given non-blocking argument before being returned. - This function implemenets the expected API for a `prepare_batch` callable in Ignite: + This function implements the expected API for a `prepare_batch` callable in Ignite: https://pytorch.org/ignite/v0.4.8/generated/ignite.engine.create_supervised_trainer.html Args: @@ -193,7 +193,7 @@ class PrepareBatchExtraInput(PrepareBatch): Args: extra_keys: If a string or sequence of strings is provided, values from the input dictionary are extracted from - those keys and passed to the nework as extra positional arguments. If a dictionary is provided, every pair + those keys and passed to the network as extra positional arguments. If a dictionary is provided, every pair `(k, v)` in that dictionary will become a new keyword argument assigning to `k` the value in the input dictionary keyed to `v`. """ diff --git a/monai/fl/client/monai_algo.py b/monai/fl/client/monai_algo.py index 4d589e4482..2cdabdca9a 100644 --- a/monai/fl/client/monai_algo.py +++ b/monai/fl/client/monai_algo.py @@ -347,7 +347,7 @@ class MonaiAlgo(ClientAlgo, MonaiAlgoStats): backend: backend to use for torch.distributed; defaults to "nccl". init_method: init_method for torch.distributed; defaults to "env://". tracking: enable the experiment tracking feature at runtime with optionally configurable and extensible. - if "mlflow", will add `MLFlowHandler` to the parsed bundle with default loggging settings, + if "mlflow", will add `MLFlowHandler` to the parsed bundle with default logging settings, if other string, treat it as file path to load the logging settings, if `dict`, treat it as logging settings, otherwise, use all the default settings. will patch the target config content with `tracking handlers` and the top-level items of `configs`. diff --git a/monai/handlers/mlflow_handler.py b/monai/handlers/mlflow_handler.py index 961b91f087..ee63c951a8 100644 --- a/monai/handlers/mlflow_handler.py +++ b/monai/handlers/mlflow_handler.py @@ -80,7 +80,7 @@ class MLFlowHandler: experiment_param: a dict recording parameters which will not change through whole experiment, like torch version, cuda version and so on. artifacts: paths to images that need to be recorded after a whole run. - optimizer_param_names: parameters' name in optimizer that need to be record during runing, + optimizer_param_names: parameters' name in optimizer that need to be record during running, defaults to "lr". close_on_complete: whether to close the mlflow run in `complete` phase in workflow, default to False. diff --git a/monai/handlers/panoptic_quality.py b/monai/handlers/panoptic_quality.py index d9e5beec59..ffa0aee03a 100644 --- a/monai/handlers/panoptic_quality.py +++ b/monai/handlers/panoptic_quality.py @@ -39,10 +39,10 @@ def __init__( reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values, available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``, ``"mean_channel"``, ``"sum_channel"``}, default to `self.reduction`. if "none", will not do reduction. - match_iou_threshold: IOU threshould to determine the pairing between `y_pred` and `y`. Usually, + match_iou_threshold: IOU threshold to determine the pairing between `y_pred` and `y`. Usually, it should >= 0.5, the pairing between instances of `y_pred` and `y` are identical. If set `match_iou_threshold` < 0.5, this function uses Munkres assignment to find the - maximal amout of unique pairing. + maximal amount of unique pairing. smooth_numerator: a small constant added to the numerator to avoid zero. output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or diff --git a/monai/inferers/inferer.py b/monai/inferers/inferer.py index b9fa143a43..d0d2f932b5 100644 --- a/monai/inferers/inferer.py +++ b/monai/inferers/inferer.py @@ -123,7 +123,7 @@ class SlidingWindowInferer(Inferer): progress: whether to print a tqdm progress bar. cache_roi_weight_map: whether to precompute the ROI weight map. cpu_thresh: when provided, dynamically switch to stitching on cpu (to save gpu memory) - when input image volume is larger than this threshold (in pixels/volxels). + when input image volume is larger than this threshold (in pixels/voxels). Otherwise use ``"device"``. Thus, the output may end-up on either cpu or gpu. Note: diff --git a/monai/metrics/generalized_dice.py b/monai/metrics/generalized_dice.py index 3a0e90d587..7fdfc61a14 100644 --- a/monai/metrics/generalized_dice.py +++ b/monai/metrics/generalized_dice.py @@ -174,7 +174,7 @@ def compute_generalized_dice( # Compute the score generalized_dice_score = numer / denom - # Handle zero deivision. Where denom == 0 and the prediction volume is 0, score is 1. + # Handle zero division. Where denom == 0 and the prediction volume is 0, score is 1. # Where denom == 0 but the prediction volume is not 0, score is 0 y_pred_o = y_pred_o.sum(dim=-1) denom_zeros = denom == 0 diff --git a/monai/metrics/panoptic_quality.py b/monai/metrics/panoptic_quality.py index 4bf87188d5..dc8cfb84b2 100644 --- a/monai/metrics/panoptic_quality.py +++ b/monai/metrics/panoptic_quality.py @@ -48,10 +48,10 @@ class PanopticQualityMetric(CumulativeIterationMetric): reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values, available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``, ``"mean_channel"``, ``"sum_channel"``}, default to `self.reduction`. if "none", will not do reduction. - match_iou_threshold: IOU threshould to determine the pairing between `y_pred` and `y`. Usually, + match_iou_threshold: IOU threshold to determine the pairing between `y_pred` and `y`. Usually, it should >= 0.5, the pairing between instances of `y_pred` and `y` are identical. If set `match_iou_threshold` < 0.5, this function uses Munkres assignment to find the - maximal amout of unique pairing. + maximal amount of unique pairing. smooth_numerator: a small constant added to the numerator to avoid zero. """ @@ -171,10 +171,10 @@ def compute_panoptic_quality( gt: ground truth. It must have the same shape as `pred` and have integer type. metric_name: output metric. The value can be "pq", "sq" or "rq". remap: whether to remap `pred` and `gt` to ensure contiguous ordering of instance id. - match_iou_threshold: IOU threshould to determine the pairing between `pred` and `gt`. Usually, + match_iou_threshold: IOU threshold to determine the pairing between `pred` and `gt`. Usually, it should >= 0.5, the pairing between instances of `pred` and `gt` are identical. If set `match_iou_threshold` < 0.5, this function uses Munkres assignment to find the - maximal amout of unique pairing. + maximal amount of unique pairing. smooth_numerator: a small constant added to the numerator to avoid zero. Raises: diff --git a/monai/metrics/utils.py b/monai/metrics/utils.py index 0c06c7768a..6219663756 100644 --- a/monai/metrics/utils.py +++ b/monai/metrics/utils.py @@ -232,7 +232,7 @@ def remap_instance_id(pred: torch.Tensor, by_size: bool = False): Args: pred: segmentation predictions in the form of torch tensor. Each value of the tensor should be an integer, and represents the prediction of its corresponding instance id. - by_size: if True, larget instance will be assigned a smaller id. + by_size: if True, largest instance will be assigned a smaller id. """ pred_id = list(pred.unique()) diff --git a/monai/networks/blocks/encoder.py b/monai/networks/blocks/encoder.py index b19317a2af..9d4dac8f57 100644 --- a/monai/networks/blocks/encoder.py +++ b/monai/networks/blocks/encoder.py @@ -19,7 +19,7 @@ class BaseEncoder(metaclass=ABCMeta): """ Abstract class defines interface of encoders in flexible unet. Encoders in flexible unet must derive from this class. Each interface method - should return a list containing relative information about a series of newtworks + should return a list containing relative information about a series of networks defined by encoder. For example, the efficient-net encoder implement 10 basic network structures in one encoder. When calling `get_encoder_name_string_list` function, a string list like ["efficientnet-b0", "efficientnet-b1" ... "efficientnet-l2"] @@ -79,6 +79,6 @@ def get_encoder_names(cls) -> List[str]: given different initialization parameters. And a name string is the key to each encoder in flexible unet backbone registry. Therefore this method should return every encoder name that needs - to be registed in flexible unet. + to be registered in flexible unet. """ raise NotImplementedError diff --git a/monai/networks/nets/flexible_unet.py b/monai/networks/nets/flexible_unet.py index 1bd4ac7c9c..28e0cedaa0 100644 --- a/monai/networks/nets/flexible_unet.py +++ b/monai/networks/nets/flexible_unet.py @@ -38,9 +38,9 @@ class FlexUNetEncoderRegister: def __init__(self): self.register_dict = {} - def regist_class(self, name: Union[Type, str]): + def register_class(self, name: Union[Type, str]): """ - Regist a given class to the encoder dict. Please notice that input class must be a + Register a given class to the encoder dict. Please notice that input class must be a subclass of BaseEncoder. """ if isinstance(name, str): @@ -74,7 +74,7 @@ def regist_class(self, name: Union[Type, str]): FLEXUNET_BACKBONE = FlexUNetEncoderRegister() -FLEXUNET_BACKBONE.regist_class(EfficientNetEncoder) +FLEXUNET_BACKBONE.register_class(EfficientNetEncoder) class UNetDecoder(nn.Module): @@ -264,7 +264,7 @@ def __init__( interp_mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``} Only used in the "nontrainable" mode. is_pad: whether to pad upsampling features to fit features from encoder. Default to True. - If this parameter is set to "True", the spatial dim of network input can be arbitary + If this parameter is set to "True", the spatial dim of network input can be arbitrary size, which is not supported by TensorRT. Otherwise, it must be a multiple of 32. """ super().__init__() diff --git a/monai/networks/nets/segresnet_ds.py b/monai/networks/nets/segresnet_ds.py index a440e28ba7..15129a7996 100644 --- a/monai/networks/nets/segresnet_ds.py +++ b/monai/networks/nets/segresnet_ds.py @@ -250,7 +250,7 @@ class SegResNetDS(nn.Module): dsdepth: number of levels for deep supervision. This will be the length of the list of outputs at each scale level. At dsdepth==1,only a single output is returned. preprocess: optional callable function to apply before the model's forward pass - resolution: optional input image resolution. When provided, the nework will first use non-isotropic kernels to bring + resolution: optional input image resolution. When provided, the network will first use non-isotropic kernels to bring image spacing into an approximately isotropic space. Otherwise, by default, the kernel size and downsampling is always isotropic. @@ -384,7 +384,7 @@ def shape_factor(self): def is_valid_shape(self, x): """ - Calculate if the input shape is divisible by the minimum factors for the current nework configuration + Calculate if the input shape is divisible by the minimum factors for the current network configuration """ a = [i % j == 0 for i, j in zip(x.shape[2:], self.shape_factor())] return all(a) diff --git a/monai/optimizers/lr_scheduler.py b/monai/optimizers/lr_scheduler.py index cb047f8bc5..dc76a3dda1 100644 --- a/monai/optimizers/lr_scheduler.py +++ b/monai/optimizers/lr_scheduler.py @@ -77,7 +77,7 @@ def __init__( t_total: total number of training iterations. cycles: cosine cycles parameter. last_epoch: the index of last epoch. - warmup_multiplier: if provided, starts the linear warmup from this fraction of the intial lr. + warmup_multiplier: if provided, starts the linear warmup from this fraction of the initial lr. Must be in 0..1 interval. Defaults to 0 Returns: None diff --git a/monai/utils/enums.py b/monai/utils/enums.py index b606cd8667..4352d83473 100644 --- a/monai/utils/enums.py +++ b/monai/utils/enums.py @@ -389,7 +389,7 @@ class TransformBackends(StrEnum): ``monai.data.MetaTensor``. Internally, some transforms are made by converting the data into ``numpy.array`` or ``cupy.array`` and use the underlying transform backend API to achieve the actual output array and converting back to ``Tensor``/``MetaTensor``. Transforms with more than one backend indicate the that they may - convert the input data types to accomodate the underlying API. + convert the input data types to accommodate the underlying API. """ TORCH = "torch" diff --git a/monai/utils/misc.py b/monai/utils/misc.py index 3569a76276..c751ad3b49 100644 --- a/monai/utils/misc.py +++ b/monai/utils/misc.py @@ -532,7 +532,7 @@ def save_obj( Args: obj: input object data to save. path: target file path to save the input object. - create_dir: whether to create dictionary of the path if not existng, default to `True`. + create_dir: whether to create dictionary of the path if not existing, default to `True`. atomic: if `True`, state is serialized to a temporary file first, then move to final destination. so that files are guaranteed to not be damaged if exception occurs. default to `True`. func: the function to save file, if None, default to `torch.save`. diff --git a/monai/visualize/occlusion_sensitivity.py b/monai/visualize/occlusion_sensitivity.py index 03c69f8978..61413c038c 100644 --- a/monai/visualize/occlusion_sensitivity.py +++ b/monai/visualize/occlusion_sensitivity.py @@ -327,7 +327,7 @@ def __call__( add, mul = None, None with eval_mode(self.nn_module): - # needs to go here to avoid cirular import + # needs to go here to avoid circular import from monai.inferers import sliding_window_inference sensitivity_im: MetaTensor = sliding_window_inference( # type: ignore diff --git a/tests/test_flexible_unet.py b/tests/test_flexible_unet.py index 7960f35925..123d494e9a 100644 --- a/tests/test_flexible_unet.py +++ b/tests/test_flexible_unet.py @@ -153,8 +153,8 @@ def forward(self, x: torch.Tensor): return feature_list -FLEXUNET_BACKBONE.regist_class(ResNetEncoder) -FLEXUNET_BACKBONE.regist_class(DummyEncoder) +FLEXUNET_BACKBONE.register_class(ResNetEncoder) +FLEXUNET_BACKBONE.register_class(DummyEncoder) def get_model_names(): @@ -405,7 +405,7 @@ class TestFlexUNetEncoderRegister(unittest.TestCase): @parameterized.expand(CASE_REGISTER_ENCODER) def test_regist(self, encoder): tmp_backbone = FlexUNetEncoderRegister() - tmp_backbone.regist_class(encoder) + tmp_backbone.register_class(encoder) for backbone in tmp_backbone.register_dict: backbone_type = tmp_backbone.register_dict[backbone]["type"] feature_number = backbone_type.num_outputs()