Skip to content

Commit

Permalink
Merge pull request open-mmlab#197 from open-mmlab/zz/typo
Browse files Browse the repository at this point in the history
Typos
  • Loading branch information
innerlee committed Feb 9, 2021
2 parents f0c38b8 + 1a2ab70 commit fe47776
Show file tree
Hide file tree
Showing 12 changed files with 20 additions and 20 deletions.
2 changes: 1 addition & 1 deletion mmedit/core/hooks/visualization.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ class VisualizationHook(Hook):
filename_tmpl (str): Format string used to save images. The output file
name will be formatted as this args. Default: 'iter_{}.png'.
rerange (bool): Whether to rerange the output value from [-1, 1] to
[0, 1]. We highly recommand users should preprocess the
[0, 1]. We highly recommend users should preprocess the
visualization results on their own. Here, we just provide a simple
interface. Default: True.
bgr2rgb (bool): Whether to reformat the channel dimension from BGR to
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ def forward(self, x, shortcut, dec_idx_feat=None):
Args:
x (Tensor): Input feature map with shape (N, C, H, W).
shortcut (Tensor): The shorcut connection with shape
shortcut (Tensor): The shortcut connection with shape
(N, C, H', W').
dec_idx_feat (Tensor, optional): The decode index feature map with
shape (N, C, H', W'). Defaults to None.
Expand Down Expand Up @@ -117,7 +117,7 @@ def init_weights(self):
normal_init(m, mean=0, std=std)

def forward(self, inputs):
"""Forward fucntion.
"""Forward function.
Args:
inputs (dict): Output dict of IndexNetEncoder.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,12 +10,12 @@
class PConvDecoder(nn.Module):
"""Decoder with partial conv.
About the details for this archetecture, pls see:
About the details for this architecture, pls see:
Image Inpainting for Irregular Holes Using Partial Convolutions
Args:
num_layers (int): The number of convolutional layers. Default: 7.
interpolation (str): The upsamle mode. Default: 'nearest'.
interpolation (str): The upsample mode. Default: 'nearest'.
conv_cfg (dict): Config for convolution module. Default:
{'type': 'PConv', 'multi_channel': True}.
norm_cfg (dict): Config for norm layer. Default:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
class PConvEncoder(nn.Module):
"""Encoder with partial conv.
About the details for this archetecture, pls see:
About the details for this architecture, pls see:
Image Inpainting for Irregular Holes Using Partial Convolutions
Args:
Expand Down
2 changes: 1 addition & 1 deletion mmedit/models/common/aspp.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,7 +114,7 @@ def forward(self, x):
x (Tensor): Input tensor with shape (N, C, H, W).
Returns:
Tensor: Outpur tensor.
Tensor: Output tensor.
"""
res = []
for conv in self.convs:
Expand Down
2 changes: 1 addition & 1 deletion mmedit/models/common/separable_conv_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def forward(self, x):
x (Tensor): Input tensor with shape (N, C, H, W).
Returns:
Tensor: Outpur tensor.
Tensor: Output tensor.
"""
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ class MultiLayerDiscriminator(nn.Module):
Args:
in_channels (int): Input channel of the first input convolution.
max_channels (int): The maxinum channel number in this structure.
max_channels (int): The maximum channel number in this structure.
num_conv (int): Number of stacked intermediate convs (including input
conv but excluding output conv).
fc_in_channels (int | None): Input dimension of the fully connected
Expand Down
2 changes: 1 addition & 1 deletion mmedit/models/components/refiners/deepfill_refiner.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ def forward(self, x, mask):
# contextual attention branch
attention_x = self.encoder_attention(x)['out']
h_x, w_x = attention_x.shape[-2:]
# resale mask to a samller size
# resale mask to a smaller size
resized_mask = F.interpolate(mask, size=(h_x, w_x))
attention_x, offset = self.contextual_attention_neck(
attention_x, resized_mask)
Expand Down
14 changes: 7 additions & 7 deletions mmedit/models/losses/perceptual_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ class PerceptualVGG(nn.Module):
forward function will return the corresponding features. This
list contains the name each layer in `vgg.feature`. An example
of this list is ['4', '10'].
vgg_tyep (str): Set the type of vgg network. Default: 'vgg19'.
vgg_type (str): Set the type of vgg network. Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image.
Importantly, the input feature must in the range [0, 1].
Default: True.
Expand Down Expand Up @@ -58,7 +58,7 @@ def __init__(self,
torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))

for v in self.vgg_layers.parameters():
v.requies_grad = False
v.requires_grad = False

def forward(self, x):
"""Forward function.
Expand Down Expand Up @@ -105,14 +105,14 @@ class PerceptualLoss(nn.Module):
use_input_norm (bool): If True, normalize the input image in vgg.
Default: True.
perceptual_weight (float): If `perceptual_weight > 0`, the perceptual
loss will be calculated and the loss will multiplified by the
loss will be calculated and the loss will multiplied by the
weight. Default: 1.0.
style_weight (flaot): If `style_weight > 0`, the style loss will be
calculated and the loss will multiplified by the weight.
style_weight (float): If `style_weight > 0`, the style loss will be
calculated and the loss will multiplied by the weight.
Default: 1.0.
norm_img (bool): If True, the image will be normed to [0, 1]. Note that
this is different from the `use_input_norm` which norm the input in
in forward fucntion of vgg according to the statistics of dataset.
in forward function of vgg according to the statistics of dataset.
Importantly, the input image must be in range [-1, 1].
pretrained (str): Path for pretrained weights. Default:
'torchvision://vgg19'
Expand Down Expand Up @@ -163,7 +163,7 @@ def forward(self, x, gt):
x_features = self.vgg(x)
gt_features = self.vgg(gt.detach())

# calculate preceptual loss
# calculate perceptual loss
if self.perceptual_weight > 0:
percep_loss = 0
for k in x_features.keys():
Expand Down
2 changes: 1 addition & 1 deletion mmedit/utils/logger.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ def get_root_logger(log_file=None, log_level=logging.INFO):
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmsr".
e.g., "mmedit".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
Expand Down
2 changes: 1 addition & 1 deletion tests/test_crop.py
Original file line number Diff line number Diff line change
Expand Up @@ -315,7 +315,7 @@ def test_crop_around_fg(self):
# keys must contain 'seg'
CropAroundFg(['fg', 'bg'])
with pytest.raises(TypeError):
# bd_ratio_range must be a tuple of 2 flaot
# bd_ratio_range must be a tuple of 2 float
CropAroundFg(['seg', 'merged'], bd_ratio_range=0.1)

keys = ['bg', 'merged', 'seg']
Expand Down
2 changes: 1 addition & 1 deletion tools/evaluate_comp1k.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ def evaluate_one(args):
def evaluate(pred_root, gt_root, trimap_root, verbose, nproc):
"""Evaluate test results of Adobe composition-1k dataset.
There are 50 different ground truth foregrounds and alpha mattes paire,
There are 50 different ground truth foregrounds and alpha mattes pairs,
each of the foreground will be composited with 20 different backgrounds,
producing 1000 images for testing. In some repo, the ground truth alpha
matte will be copied 20 times and named the same as the images. This
Expand Down

0 comments on commit fe47776

Please sign in to comment.