Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion monai/apps/detection/networks/retinanet_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -604,7 +604,7 @@ def postprocess_detections(
) -> List[Dict[str, Tensor]]:
"""
Postprocessing to generate detection result from classification logits and box regression.
Use self.box_selector to select the final outut boxes for each image.
Use self.box_selector to select the final output boxes for each image.

Args:
head_outputs_reshape: reshaped head_outputs. ``head_output_reshape[self.cls_key]`` is a Tensor
Expand Down
12 changes: 6 additions & 6 deletions monai/apps/nuclick/transforms.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,21 +315,21 @@ def __call__(self, data):
mask = d[self.label] if isinstance(d[self.label], torch.Tensor) else torch.from_numpy(d[self.label])

inc_sig = self.inclusion_map(mask[0], dtype=image.dtype)
inc_sig = self._apply_gaussion(inc_sig)
inc_sig = self._apply_gaussian(inc_sig)
if self.add_exclusion_map:
others = d[self.others] if isinstance(d[self.others], torch.Tensor) else torch.from_numpy(d[self.others])
exc_sig = self.exclusion_map(
others[0], dtype=image.dtype, drop_rate=self.drop_rate, jitter_range=self.jitter_range
)
exc_sig = self._apply_gaussion(exc_sig)
exc_sig = self._apply_gaussian(exc_sig)
image = torch.cat((image, inc_sig[None], exc_sig[None]), dim=0)
else:
image = torch.cat((image, inc_sig[None]), dim=0)

d[self.image] = image if isinstance(d[self.image], torch.Tensor) else convert_to_numpy(image)
return d

def _apply_gaussion(self, t):
def _apply_gaussian(self, t):
if not self.gaussian or torch.count_nonzero(t) == 0:
return t
x = GaussianFilter(spatial_dims=2, truncated=self.truncated, sigma=self.sigma)(t.unsqueeze(0).unsqueeze(0))
Expand Down Expand Up @@ -500,19 +500,19 @@ def get_patches_and_signals(self, img, click_map, bounding_boxes, cx, cy, x, y):
this_click_map[cx[i], cy[i]] = 1

nuc_points = this_click_map[x_start:x_end, y_start:y_end]
nuc_points = self._apply_gaussion(nuc_points)
nuc_points = self._apply_gaussian(nuc_points)

if self.add_exclusion_map:
others_click_map = ((click_map - this_click_map) > 0).type(img.dtype)
other_points = others_click_map[x_start:x_end, y_start:y_end]
other_points = self._apply_gaussion(other_points)
other_points = self._apply_gaussian(other_points)
patches.append(torch.cat([patch, nuc_points[None], other_points[None]]))
else:
patches.append(torch.cat([patch, nuc_points[None]]))

return torch.stack(patches)

def _apply_gaussion(self, t):
def _apply_gaussian(self, t):
if not self.gaussian or torch.count_nonzero(t) == 0:
return t
x = GaussianFilter(spatial_dims=2, truncated=self.truncated, sigma=self.sigma)(t.unsqueeze(0).unsqueeze(0))
Expand Down
2 changes: 1 addition & 1 deletion monai/apps/pathology/engines/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ class PrepareBatchHoVerNet(PrepareBatch):

Args:
extra_keys: If a sequence of strings is provided, values from the input dictionary are extracted from
those keys and passed to the nework as extra positional arguments.
those keys and passed to the network as extra positional arguments.
"""

def __init__(self, extra_keys: Sequence[str]) -> None:
Expand Down
2 changes: 1 addition & 1 deletion monai/apps/reconstruction/transforms/array.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def __init__(
same length as center_fractions. If multiple values are
provided, then one of these is chosen uniformly each time.
spatial_dims: Number of spatial dims (e.g., it's 2 for a 2D data;
it's also 2 for psuedo-3D datasets like the fastMRI dataset).
it's also 2 for pseudo-3D datasets like the fastMRI dataset).
The last spatial dim is selected for sampling. For the fastMRI
dataset, k-space has the form (...,num_slices,num_coils,H,W)
and sampling is done along W. For a general 3D data with the
Expand Down
4 changes: 2 additions & 2 deletions monai/apps/reconstruction/transforms/dictionary.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ class RandomKspaceMaskd(RandomizableTransform, MapTransform):
same length as center_fractions. If multiple values are provided,
then one of these is chosen uniformly each time.
spatial_dims: Number of spatial dims (e.g., it's 2 for a 2D data; it's
also 2 for psuedo-3D datasets like the fastMRI dataset).
also 2 for pseudo-3D datasets like the fastMRI dataset).
The last spatial dim is selected for sampling. For the fastMRI
dataset, k-space has the form (...,num_slices,num_coils,H,W)
and sampling is done along W. For a general 3D data with the
Expand Down Expand Up @@ -151,7 +151,7 @@ class EquispacedKspaceMaskd(RandomKspaceMaskd):
length as center_fractions. If multiple values are provided,
then one of these is chosen uniformly each time.
spatial_dims: Number of spatial dims (e.g., it's 2 for a 2D data;
it's also 2 for psuedo-3D datasets like the fastMRI dataset).
it's also 2 for pseudo-3D datasets like the fastMRI dataset).
The last spatial dim is selected for sampling. For the fastMRI
dataset, k-space has the form (...,num_slices,num_coils,H,W)
and sampling is done along W. For a general 3D data with the shape
Expand Down
10 changes: 6 additions & 4 deletions monai/bundle/scripts.py
Original file line number Diff line number Diff line change
Expand Up @@ -366,7 +366,7 @@ def get_all_bundles_list(
"""
Get all bundles names (and the latest versions) that are stored in the release of specified repository
with the provided tag. The default values of arguments correspond to the release of MONAI model zoo.
In order to increase the rate limits of calling GIthub APIs, you can input your personal access token.
In order to increase the rate limits of calling Github APIs, you can input your personal access token.
Please check the following link for more details about rate limiting:
https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting

Expand Down Expand Up @@ -401,7 +401,7 @@ def get_bundle_versions(
"""
Get the latest version, as well as all existing versions of a bundle that is stored in the release of specified
repository with the provided tag.
In order to increase the rate limits of calling GIthub APIs, you can input your personal access token.
In order to increase the rate limits of calling Github APIs, you can input your personal access token.
Please check the following link for more details about rate limiting:
https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting

Expand Down Expand Up @@ -439,7 +439,7 @@ def get_bundle_info(
Get all information
(include "id", "name", "size", "download_count", "browser_download_url", "created_at", "updated_at") of a bundle
with the specified bundle name and version.
In order to increase the rate limits of calling GIthub APIs, you can input your personal access token.
In order to increase the rate limits of calling Github APIs, you can input your personal access token.
Please check the following link for more details about rate limiting:
https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting

Expand Down Expand Up @@ -544,7 +544,7 @@ def run(
logging_file: config file for `logging` module in the program, default to `None`. for more details:
https://docs.python.org/3/library/logging.config.html#logging.config.fileConfig.
tracking: enable the experiment tracking feature at runtime with optionally configurable and extensible.
if "mlflow", will add `MLFlowHandler` to the parsed bundle with default loggging settings,
if "mlflow", will add `MLFlowHandler` to the parsed bundle with default logging settings,
if other string, treat it as file path to load the logging settings, if `dict`,
treat it as logging settings, otherwise, use all the default settings.
will patch the target config content with `tracking handlers` and the top-level items of `configs`.
Expand Down Expand Up @@ -925,6 +925,8 @@ def init_bundle(
network: if given instead of ckpt_file this network's weights will be stored in bundle
dataset_license: if `True`, a default license file called "data_license.txt" will be produced. This
file is required if there are any license conditions stated for data your bundle uses.
metadata_str: optional metadata string to write to bundle, if not given a default will be used.
inference_str: optional inference string to write to bundle, if not given a default will be used.
"""
if metadata_str is None:
metadata_str = DEFAULT_METADATA
Expand Down
2 changes: 1 addition & 1 deletion monai/bundle/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@
},
}

DEFAULT_EXP_MGMT_SETTINGS = {"mlflow": DEFAULT_MLFLOW_SETTINGS} # default expriment management settings
DEFAULT_EXP_MGMT_SETTINGS = {"mlflow": DEFAULT_MLFLOW_SETTINGS} # default experiment management settings


def load_bundle_config(bundle_path: str, *config_names, **load_kw_args) -> Any:
Expand Down
2 changes: 1 addition & 1 deletion monai/data/box_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -1055,7 +1055,7 @@ def non_max_suppression(
boxes_t, *_ = convert_data_type(boxes, torch.Tensor)
scores_t, *_ = convert_to_dst_type(scores, boxes_t)

# sort boxes in desending order according to the scores
# sort boxes in descending order according to the scores
sort_idxs = torch.argsort(scores_t, dim=0, descending=True)
boxes_sort = deepcopy(boxes_t)[sort_idxs, :]

Expand Down
4 changes: 2 additions & 2 deletions monai/data/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -978,10 +978,10 @@ class SmartCacheDataset(Randomizable, CacheDataset):
will take the minimum of (cache_num, data_length x cache_rate, data_length).
num_init_workers: the number of worker threads to initialize the cache for first epoch.
If num_init_workers is None then the number returned by os.cpu_count() is used.
If a value less than 1 is speficied, 1 will be used instead.
If a value less than 1 is specified, 1 will be used instead.
num_replace_workers: the number of worker threads to prepare the replacement cache for every epoch.
If num_replace_workers is None then the number returned by os.cpu_count() is used.
If a value less than 1 is speficied, 1 will be used instead.
If a value less than 1 is specified, 1 will be used instead.
progress: whether to display a progress bar when caching for the first epoch.
shuffle: whether to shuffle the whole data list before preparing the cache content for first epoch.
it will not modify the original input data sequence in-place.
Expand Down
4 changes: 2 additions & 2 deletions monai/data/fft_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
def ifftn_centered(ksp: NdarrayOrTensor, spatial_dims: int, is_complex: bool = True) -> NdarrayOrTensor:
"""
Pytorch-based ifft for spatial_dims-dim signals. "centered" means this function automatically takes care
of the required ifft and fft shifts. This function calls monai.metworks.blocks.fft_utils_t.ifftn_centered_t.
of the required ifft and fft shifts. This function calls monai.networks.blocks.fft_utils_t.ifftn_centered_t.
This is equivalent to do fft in numpy based on numpy.fft.ifftn, numpy.fft.fftshift, and numpy.fft.ifftshift

Args:
Expand Down Expand Up @@ -58,7 +58,7 @@ def ifftn_centered(ksp: NdarrayOrTensor, spatial_dims: int, is_complex: bool = T
def fftn_centered(im: NdarrayOrTensor, spatial_dims: int, is_complex: bool = True) -> NdarrayOrTensor:
"""
Pytorch-based fft for spatial_dims-dim signals. "centered" means this function automatically takes care
of the required ifft and fft shifts. This function calls monai.metworks.blocks.fft_utils_t.fftn_centered_t.
of the required ifft and fft shifts. This function calls monai.networks.blocks.fft_utils_t.fftn_centered_t.
This is equivalent to do ifft in numpy based on numpy.fft.fftn, numpy.fft.fftshift, and numpy.fft.ifftshift

Args:
Expand Down
2 changes: 1 addition & 1 deletion monai/data/image_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -647,7 +647,7 @@ def _get_meta_dict(self, img) -> Dict:

"""

metadata = img.to_json_dict()
metadata = img.to_json_dict(suppress_invalid_tags=True)

if self.prune_metadata:
prune_metadata = {}
Expand Down
2 changes: 1 addition & 1 deletion monai/data/image_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -784,7 +784,7 @@ def resample_and_clip(
Args:
data_array: input data array. This method assumes the 'channel-last' format.
output_spatial_shape: output spatial shape.
mode: interpolation mode, defautl is ``InterpolateMode.BICUBIC``.
mode: interpolation mode, default is ``InterpolateMode.BICUBIC``.
"""

data: np.ndarray = convert_data_type(data_array, np.ndarray)[0]
Expand Down
2 changes: 1 addition & 1 deletion monai/data/wsi_reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -616,7 +616,7 @@ def get_mpp(self, wsi, level: Optional[int] = None) -> Tuple[float, float]:
unit = wsi.properties["tiff.ResolutionUnit"]
if unit == "centimeter":
factor = 10000.0
elif unit == "milimeter":
elif unit == "millimeter":
factor = 1000.0
elif unit == "micrometer":
factor = 1.0
Expand Down
4 changes: 2 additions & 2 deletions monai/engines/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def default_prepare_batch(
If `CommonKeys.REALS` is present this is returned with None. All returned tensors are moved to the given device
using the given non-blocking argument before being returned.

This function implemenets the expected API for a `prepare_batch` callable in Ignite:
This function implements the expected API for a `prepare_batch` callable in Ignite:
https://pytorch.org/ignite/v0.4.8/generated/ignite.engine.create_supervised_trainer.html

Args:
Expand Down Expand Up @@ -193,7 +193,7 @@ class PrepareBatchExtraInput(PrepareBatch):

Args:
extra_keys: If a string or sequence of strings is provided, values from the input dictionary are extracted from
those keys and passed to the nework as extra positional arguments. If a dictionary is provided, every pair
those keys and passed to the network as extra positional arguments. If a dictionary is provided, every pair
`(k, v)` in that dictionary will become a new keyword argument assigning to `k` the value in the input
dictionary keyed to `v`.
"""
Expand Down
2 changes: 1 addition & 1 deletion monai/fl/client/monai_algo.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ class MonaiAlgo(ClientAlgo, MonaiAlgoStats):
backend: backend to use for torch.distributed; defaults to "nccl".
init_method: init_method for torch.distributed; defaults to "env://".
tracking: enable the experiment tracking feature at runtime with optionally configurable and extensible.
if "mlflow", will add `MLFlowHandler` to the parsed bundle with default loggging settings,
if "mlflow", will add `MLFlowHandler` to the parsed bundle with default logging settings,
if other string, treat it as file path to load the logging settings, if `dict`,
treat it as logging settings, otherwise, use all the default settings.
will patch the target config content with `tracking handlers` and the top-level items of `configs`.
Expand Down
2 changes: 1 addition & 1 deletion monai/handlers/mlflow_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ class MLFlowHandler:
experiment_param: a dict recording parameters which will not change through whole experiment,
like torch version, cuda version and so on.
artifacts: paths to images that need to be recorded after a whole run.
optimizer_param_names: parameters' name in optimizer that need to be record during runing,
optimizer_param_names: parameters' name in optimizer that need to be record during running,
defaults to "lr".
close_on_complete: whether to close the mlflow run in `complete` phase in workflow, default to False.

Expand Down
4 changes: 2 additions & 2 deletions monai/handlers/panoptic_quality.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,10 @@ def __init__(
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}, default to `self.reduction`. if "none", will not do reduction.
match_iou_threshold: IOU threshould to determine the pairing between `y_pred` and `y`. Usually,
match_iou_threshold: IOU threshold to determine the pairing between `y_pred` and `y`. Usually,
it should >= 0.5, the pairing between instances of `y_pred` and `y` are identical.
If set `match_iou_threshold` < 0.5, this function uses Munkres assignment to find the
maximal amout of unique pairing.
maximal amount of unique pairing.
smooth_numerator: a small constant added to the numerator to avoid zero.
output_transform: callable to extract `y_pred` and `y` from `ignite.engine.state.output` then
construct `(y_pred, y)` pair, where `y_pred` and `y` can be `batch-first` Tensors or
Expand Down
2 changes: 1 addition & 1 deletion monai/inferers/inferer.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ class SlidingWindowInferer(Inferer):
progress: whether to print a tqdm progress bar.
cache_roi_weight_map: whether to precompute the ROI weight map.
cpu_thresh: when provided, dynamically switch to stitching on cpu (to save gpu memory)
when input image volume is larger than this threshold (in pixels/volxels).
when input image volume is larger than this threshold (in pixels/voxels).
Otherwise use ``"device"``. Thus, the output may end-up on either cpu or gpu.

Note:
Expand Down
2 changes: 1 addition & 1 deletion monai/losses/contrastive.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from distutils.log import warn
from warnings import warn

import torch
from torch.nn import functional as F
Expand Down
2 changes: 1 addition & 1 deletion monai/metrics/generalized_dice.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ def compute_generalized_dice(
# Compute the score
generalized_dice_score = numer / denom

# Handle zero deivision. Where denom == 0 and the prediction volume is 0, score is 1.
# Handle zero division. Where denom == 0 and the prediction volume is 0, score is 1.
# Where denom == 0 but the prediction volume is not 0, score is 0
y_pred_o = y_pred_o.sum(dim=-1)
denom_zeros = denom == 0
Expand Down
8 changes: 4 additions & 4 deletions monai/metrics/panoptic_quality.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,10 @@ class PanopticQualityMetric(CumulativeIterationMetric):
reduction: define mode of reduction to the metrics, will only apply reduction on `not-nan` values,
available reduction modes: {``"none"``, ``"mean"``, ``"sum"``, ``"mean_batch"``, ``"sum_batch"``,
``"mean_channel"``, ``"sum_channel"``}, default to `self.reduction`. if "none", will not do reduction.
match_iou_threshold: IOU threshould to determine the pairing between `y_pred` and `y`. Usually,
match_iou_threshold: IOU threshold to determine the pairing between `y_pred` and `y`. Usually,
it should >= 0.5, the pairing between instances of `y_pred` and `y` are identical.
If set `match_iou_threshold` < 0.5, this function uses Munkres assignment to find the
maximal amout of unique pairing.
maximal amount of unique pairing.
smooth_numerator: a small constant added to the numerator to avoid zero.

"""
Expand Down Expand Up @@ -171,10 +171,10 @@ def compute_panoptic_quality(
gt: ground truth. It must have the same shape as `pred` and have integer type.
metric_name: output metric. The value can be "pq", "sq" or "rq".
remap: whether to remap `pred` and `gt` to ensure contiguous ordering of instance id.
match_iou_threshold: IOU threshould to determine the pairing between `pred` and `gt`. Usually,
match_iou_threshold: IOU threshold to determine the pairing between `pred` and `gt`. Usually,
it should >= 0.5, the pairing between instances of `pred` and `gt` are identical.
If set `match_iou_threshold` < 0.5, this function uses Munkres assignment to find the
maximal amout of unique pairing.
maximal amount of unique pairing.
smooth_numerator: a small constant added to the numerator to avoid zero.

Raises:
Expand Down
2 changes: 1 addition & 1 deletion monai/metrics/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def remap_instance_id(pred: torch.Tensor, by_size: bool = False):
Args:
pred: segmentation predictions in the form of torch tensor. Each
value of the tensor should be an integer, and represents the prediction of its corresponding instance id.
by_size: if True, larget instance will be assigned a smaller id.
by_size: if True, largest instance will be assigned a smaller id.

"""
pred_id = list(pred.unique())
Expand Down
Loading