diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.BCELoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.BCELoss.md index 3afcbc019a5..45050b1d61e 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.BCELoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.BCELoss.md @@ -25,65 +25,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | reduction | reduction | 表示应用于输出结果的计算方式。 | ### 转写示例 - - -#### size_average -size_average 为 True +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 -torch.nn.BCELoss(size_average=True) +torch.nn.BCELoss(weight=w, size_average=False, reduce=True) +torch.nn.BCELoss(weight=w, size_average=False) # Paddle 写法 -paddle.nn.BCELoss(reduction='mean') +paddle.nn.BCELoss(weight=w, reduction='sum') ``` -size_average 为 False +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 -torch.nn.BCELoss(size_average=False) +torch.nn.BCELoss(weight=w, size_average=True, reduce=True) +torch.nn.BCELoss(weight=w, reduce=True) +torch.nn.BCELoss(weight=w, size_average=True) +torch.nn.BCELoss(weight=w) # Paddle 写法 -paddle.nn.BCELoss(reduction='sum') +paddle.nn.BCELoss(weight=w, reduction='mean') ``` -#### reduce -reduce 为 True -```python -# PyTorch 写法 -torch.nn.BCELoss(reduce=True) -# Paddle 写法 -paddle.nn.BCELoss(reduction='sum') -``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.BCELoss(reduce=False) - -# Paddle 写法 -paddle.nn.BCELoss(reduction='none') -``` -#### reduction -reduction 为'none' -```python -# PyTorch 写法 -torch.nn.BCELoss(reduction='none') - -# Paddle 写法 -paddle.nn.BCELoss(reduction='none') -``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.BCELoss(reduction='mean') - -# Paddle 写法 -paddle.nn.BCELoss(reduction='mean') -``` -reduction 为'sum' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.BCELoss(reduction='sum') +torch.nn.BCELoss(weight=w, size_average=True, reduce=False) +torch.nn.BCELoss(weight=w, size_average=False, reduce=False) +torch.nn.BCELoss(weight=w, reduce=False) # Paddle 写法 -paddle.nn.BCELoss(reduction='sum') +paddle.nn.BCELoss(weight=w, reduction='none') ``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.BCEWithLogitsLoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.BCEWithLogitsLoss.md index 12ff093d8a3..e1679104d64 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.BCEWithLogitsLoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.BCEWithLogitsLoss.md @@ -28,65 +28,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | pos_weight | pos_weight | 表示正类的权重。 | ### 转写示例 - - -#### size_average -size_average 为 True +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 -torch.nn.BCEWithLogitsLoss(size_average=True) +torch.nn.BCEWithLogitsLoss(weight=w, size_average=False, reduce=True) +torch.nn.BCEWithLogitsLoss(weight=w, size_average=False) # Paddle 写法 -paddle.nn.BCEWithLogitsLoss(reduction='mean') +paddle.nn.BCEWithLogitsLoss(weight=w, reduction='sum') ``` -size_average 为 False +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 -torch.nn.BCEWithLogitsLoss(size_average=False) +torch.nn.BCEWithLogitsLoss(weight=w, size_average=True, reduce=True) +torch.nn.BCEWithLogitsLoss(weight=w, reduce=True) +torch.nn.BCEWithLogitsLoss(weight=w, size_average=True) +torch.nn.BCEWithLogitsLoss(weight=w) # Paddle 写法 -paddle.nn.BCEWithLogitsLoss(reduction='sum') +paddle.nn.BCEWithLogitsLoss(weight=w, reduction='mean') ``` -#### reduce -reduce 为 True -```python -# PyTorch 写法 -torch.nn.BCEWithLogitsLoss(reduce=True) -# Paddle 写法 -paddle.nn.BCEWithLogitsLoss(reduction='sum') -``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.BCEWithLogitsLoss(reduce=False) - -# Paddle 写法 -paddle.nn.BCEWithLogitsLoss(reduction='none') -``` -#### reduction -reduction 为'none' -```python -# PyTorch 写法 -torch.nn.BCEWithLogitsLoss(reduction='none') - -# Paddle 写法 -paddle.nn.BCEWithLogitsLoss(reduction='none') -``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.BCEWithLogitsLoss(reduction='mean') - -# Paddle 写法 -paddle.nn.BCEWithLogitsLoss(reduction='mean') -``` -reduction 为'sum' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.BCEWithLogitsLoss(reduction='sum') +torch.nn.BCEWithLogitsLoss(weight=w, size_average=True, reduce=False) +torch.nn.BCEWithLogitsLoss(weight=w, size_average=False, reduce=False) +torch.nn.BCEWithLogitsLoss(weight=w, reduce=False) # Paddle 写法 -paddle.nn.BCEWithLogitsLoss(reduction='sum') +paddle.nn.BCEWithLogitsLoss(weight=w, reduction='none') ``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.CosineEmbeddingLoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.CosineEmbeddingLoss.md index e8da5b11fcc..9956eff1510 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.CosineEmbeddingLoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.CosineEmbeddingLoss.md @@ -21,65 +21,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | reduction | reduction | 指定应用于输出结果的计算方式。 | ### 转写示例 - - -#### size_average -size_average 为 True +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 -torch.nn.CosineEmbeddingLoss(size_average=True) +torch.nn.CosineEmbeddingLoss(margin=m, size_average=False, reduce=True) +torch.nn.CosineEmbeddingLoss(margin=m, size_average=False) # Paddle 写法 -paddle.nn.CosineEmbeddingLoss(reduction='mean') +paddle.nn.CosineEmbeddingLoss(margin=m, reduction='sum') ``` -size_average 为 False +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 -torch.nn.CosineEmbeddingLoss(size_average=False) +torch.nn.CosineEmbeddingLoss(margin=m, size_average=True, reduce=True) +torch.nn.CosineEmbeddingLoss(margin=m, reduce=True) +torch.nn.CosineEmbeddingLoss(margin=m, size_average=True) +torch.nn.CosineEmbeddingLoss(margin=m) # Paddle 写法 -paddle.nn.CosineEmbeddingLoss(reduction='sum') +paddle.nn.CosineEmbeddingLoss(margin=m, reduction='mean') ``` -#### reduce -reduce 为 True -```python -# PyTorch 写法 -torch.nn.CosineEmbeddingLoss(reduce=True) -# Paddle 写法 -paddle.nn.CosineEmbeddingLoss(reduction='sum') -``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.CosineEmbeddingLoss(reduce=False) - -# Paddle 写法 -paddle.nn.CosineEmbeddingLoss(reduction='none') -``` -#### reduction -reduction 为'none' -```python -# PyTorch 写法 -torch.nn.CosineEmbeddingLoss(reduction='none') - -# Paddle 写法 -paddle.nn.CosineEmbeddingLoss(reduction='none') -``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.CosineEmbeddingLoss(reduction='mean') - -# Paddle 写法 -paddle.nn.CosineEmbeddingLoss(reduction='mean') -``` -reduction 为'sum' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.CosineEmbeddingLoss(reduction='sum') +torch.nn.CosineEmbeddingLoss(margin=m, size_average=True, reduce=False) +torch.nn.CosineEmbeddingLoss(margin=m, size_average=False, reduce=False) +torch.nn.CosineEmbeddingLoss(margin=m, reduce=False) # Paddle 写法 -paddle.nn.CosineEmbeddingLoss(reduction='sum') +paddle.nn.CosineEmbeddingLoss(margin=m, reduction='none') ``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.CrossEntropyLoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.CrossEntropyLoss.md index ce5abfce407..a6267b51562 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.CrossEntropyLoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.CrossEntropyLoss.md @@ -36,65 +36,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | - | axis | 进行 softmax 计算的维度索引,PyTorch 无此参数,Paddle 保持默认即可。 | ### 转写示例 - - -#### size_average -size_average 为 True +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 -torch.nn.CrossEntropyLoss(size_average=True) +torch.nn.CrossEntropyLoss(weight=w, size_average=False, reduce=True) +torch.nn.CrossEntropyLoss(weight=w, size_average=False) # Paddle 写法 -paddle.nn.CrossEntropyLoss(reduction='mean') +paddle.nn.CrossEntropyLoss(weight=w, reduction='sum') ``` -size_average 为 False +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 -torch.nn.CrossEntropyLoss(size_average=False) +torch.nn.CrossEntropyLoss(weight=w, size_average=True, reduce=True) +torch.nn.CrossEntropyLoss(weight=w, reduce=True) +torch.nn.CrossEntropyLoss(weight=w, size_average=True) +torch.nn.CrossEntropyLoss(weight=w) # Paddle 写法 -paddle.nn.CrossEntropyLoss(reduction='sum') +paddle.nn.CrossEntropyLoss(weight=w, reduction='mean') ``` -#### reduce -reduce 为 True -```python -# PyTorch 写法 -torch.nn.CrossEntropyLoss(reduce=True) -# Paddle 写法 -paddle.nn.CrossEntropyLoss(reduction='sum') -``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.CrossEntropyLoss(reduce=False) - -# Paddle 写法 -paddle.nn.CrossEntropyLoss(reduction='none') -``` -#### reduction -reduction 为'none' -```python -# PyTorch 写法 -torch.nn.CrossEntropyLoss(reduction='none') - -# Paddle 写法 -paddle.nn.CrossEntropyLoss(reduction='none') -``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.CrossEntropyLoss(reduction='mean') - -# Paddle 写法 -paddle.nn.CrossEntropyLoss(reduction='mean') -``` -reduction 为'sum' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.CrossEntropyLoss(reduction='sum') +torch.nn.CrossEntropyLoss(weight=w, size_average=True, reduce=False) +torch.nn.CrossEntropyLoss(weight=w, size_average=False, reduce=False) +torch.nn.CrossEntropyLoss(weight=w, reduce=False) # Paddle 写法 -paddle.nn.CrossEntropyLoss(reduction='sum') +paddle.nn.CrossEntropyLoss(weight=w, reduction='none') ``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.HingeEmbeddingLoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.HingeEmbeddingLoss.md index 55fe740b4c5..5ed9fa04b8f 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.HingeEmbeddingLoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.HingeEmbeddingLoss.md @@ -26,65 +26,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | reduction | reduction | 表示应用于输出结果的计算方式。 | ### 转写示例 - - -#### size_average -size_average 为 True +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 -torch.nn.HingeEmbeddingLoss(size_average=True) +torch.nn.HingeEmbeddingLoss(margin=m, size_average=False, reduce=True) +torch.nn.HingeEmbeddingLoss(margin=m, size_average=False) # Paddle 写法 -paddle.nn.HingeEmbeddingLoss(reduction='mean') +paddle.nn.HingeEmbeddingLoss(margin=m, reduction='sum') ``` -size_average 为 False +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 -torch.nn.HingeEmbeddingLoss(size_average=False) +torch.nn.HingeEmbeddingLoss(margin=m, size_average=True, reduce=True) +torch.nn.HingeEmbeddingLoss(margin=m, reduce=True) +torch.nn.HingeEmbeddingLoss(margin=m, size_average=True) +torch.nn.HingeEmbeddingLoss(margin=m) # Paddle 写法 -paddle.nn.HingeEmbeddingLoss(reduction='sum') +paddle.nn.HingeEmbeddingLoss(margin=m, reduction='mean') ``` -#### reduce -reduce 为 True -```python -# PyTorch 写法 -torch.nn.HingeEmbeddingLoss(reduce=True) -# Paddle 写法 -paddle.nn.HingeEmbeddingLoss(reduction='sum') -``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.HingeEmbeddingLoss(reduce=False) - -# Paddle 写法 -paddle.nn.HingeEmbeddingLoss(reduction='none') -``` -#### reduction -reduction 为'none' -```python -# PyTorch 写法 -torch.nn.HingeEmbeddingLoss(reduction='none') - -# Paddle 写法 -paddle.nn.HingeEmbeddingLoss(reduction='none') -``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.HingeEmbeddingLoss(reduction='mean') - -# Paddle 写法 -paddle.nn.HingeEmbeddingLoss(reduction='mean') -``` -reduction 为'sum' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.HingeEmbeddingLoss(reduction='sum') +torch.nn.HingeEmbeddingLoss(margin=m, size_average=True, reduce=False) +torch.nn.HingeEmbeddingLoss(margin=m, size_average=False, reduce=False) +torch.nn.HingeEmbeddingLoss(margin=m, reduce=False) # Paddle 写法 -paddle.nn.HingeEmbeddingLoss(reduction='sum') +paddle.nn.HingeEmbeddingLoss(margin=m, reduction='none') ``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.KLDivLoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.KLDivLoss.md index f5aecab138b..fee4de43bcc 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.KLDivLoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.KLDivLoss.md @@ -24,65 +24,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | log_target | log_target | 指定目标是否属于 log 空间。 | ### 转写示例 - - -#### size_average -size_average 为 True -```python -# PyTorch 写法 -torch.nn.KLDivLoss(size_average=True) - -# Paddle 写法 -paddle.nn.KLDivLoss(reduction='mean') -``` - -size_average 为 False +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 +torch.nn.KLDivLoss(size_average=False, reduce=True) torch.nn.KLDivLoss(size_average=False) # Paddle 写法 paddle.nn.KLDivLoss(reduction='sum') ``` -#### reduce -reduce 为 True + +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 +torch.nn.KLDivLoss(size_average=True, reduce=True) torch.nn.KLDivLoss(reduce=True) +torch.nn.KLDivLoss(size_average=True) +torch.nn.KLDivLoss() # Paddle 写法 -paddle.nn.KLDivLoss(reduction='sum') +paddle.nn.KLDivLoss(reduction='mean') ``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.KLDivLoss(reduce=False) -# Paddle 写法 -paddle.nn.KLDivLoss(reduction='none') -``` -#### reduction -reduction 为'none' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.KLDivLoss(reduction='none') +torch.nn.KLDivLoss(size_average=True, reduce=False) +torch.nn.KLDivLoss(size_average=False, reduce=False) +torch.nn.KLDivLoss(reduce=False) # Paddle 写法 paddle.nn.KLDivLoss(reduction='none') ``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.KLDivLoss(reduction='mean') - -# Paddle 写法 -paddle.nn.KLDivLoss(reduction='mean') -``` -reduction 为'sum' -```python -# PyTorch 写法 -torch.nn.KLDivLoss(reduction='sum') - -# Paddle 写法 -paddle.nn.KLDivLoss(reduction='sum') -``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.L1Loss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.L1Loss.md index 39592b67eac..8890b0402d2 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.L1Loss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.L1Loss.md @@ -22,65 +22,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | reduction | reduction | 表示对输出结果的计算方式。 | ### 转写示例 - - -#### size_average -size_average 为 True -```python -# PyTorch 写法 -torch.nn.L1Loss(size_average=True) - -# Paddle 写法 -paddle.nn.L1Loss(reduction='mean') -``` - -size_average 为 False +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 +torch.nn.L1Loss(size_average=False, reduce=True) torch.nn.L1Loss(size_average=False) # Paddle 写法 paddle.nn.L1Loss(reduction='sum') ``` -#### reduce -reduce 为 True + +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 +torch.nn.L1Loss(size_average=True, reduce=True) torch.nn.L1Loss(reduce=True) +torch.nn.L1Loss(size_average=True) +torch.nn.L1Loss() # Paddle 写法 -paddle.nn.L1Loss(reduction='sum') +paddle.nn.L1Loss(reduction='mean') ``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.L1Loss(reduce=False) -# Paddle 写法 -paddle.nn.L1Loss(reduction='none') -``` -#### reduction -reduction 为'none' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.L1Loss(reduction='none') +torch.nn.L1Loss(size_average=True, reduce=False) +torch.nn.L1Loss(size_average=False, reduce=False) +torch.nn.L1Loss(reduce=False) # Paddle 写法 paddle.nn.L1Loss(reduction='none') ``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.L1Loss(reduction='mean') - -# Paddle 写法 -paddle.nn.L1Loss(reduction='mean') -``` -reduction 为'sum' -```python -# PyTorch 写法 -torch.nn.L1Loss(reduction='sum') - -# Paddle 写法 -paddle.nn.L1Loss(reduction='sum') -``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MSELoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MSELoss.md index 60ae87050e3..656266537c4 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MSELoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MSELoss.md @@ -21,65 +21,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | reduction | reduction | 表示对输出结果的计算方式。 | ### 转写示例 - - -#### size_average -size_average 为 True -```python -# PyTorch 写法 -torch.nn.MSELoss(size_average=True) - -# Paddle 写法 -paddle.nn.MSELoss(reduction='mean') -``` - -size_average 为 False +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 +torch.nn.MSELoss(size_average=False, reduce=True) torch.nn.MSELoss(size_average=False) # Paddle 写法 paddle.nn.MSELoss(reduction='sum') ``` -#### reduce -reduce 为 True + +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 +torch.nn.MSELoss(size_average=True, reduce=True) torch.nn.MSELoss(reduce=True) +torch.nn.MSELoss(size_average=True) +torch.nn.MSELoss() # Paddle 写法 -paddle.nn.MSELoss(reduction='sum') +paddle.nn.MSELoss(reduction='mean') ``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.MSELoss(reduce=False) -# Paddle 写法 -paddle.nn.MSELoss(reduction='none') -``` -#### reduction -reduction 为'none' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.MSELoss(reduction='none') +torch.nn.MSELoss(size_average=True, reduce=False) +torch.nn.MSELoss(size_average=False, reduce=False) +torch.nn.MSELoss(reduce=False) # Paddle 写法 paddle.nn.MSELoss(reduction='none') ``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.MSELoss(reduction='mean') - -# Paddle 写法 -paddle.nn.MSELoss(reduction='mean') -``` -reduction 为'sum' -```python -# PyTorch 写法 -torch.nn.MSELoss(reduction='sum') - -# Paddle 写法 -paddle.nn.MSELoss(reduction='sum') -``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MarginRankingLoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MarginRankingLoss.md index b10971244cf..2fce7e9ac0b 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MarginRankingLoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MarginRankingLoss.md @@ -26,65 +26,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | reduction | reduction | 表示应用于输出结果的计算方式。 | ### 转写示例 - - -#### size_average -size_average 为 True +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 -torch.nn.MarginRankingLoss(size_average=True) +torch.nn.MarginRankingLoss(margin=m, size_average=False, reduce=True) +torch.nn.MarginRankingLoss(margin=m, size_average=False) # Paddle 写法 -paddle.nn.MarginRankingLoss(reduction='mean') +paddle.nn.MarginRankingLoss(margin=m, reduction='sum') ``` -size_average 为 False +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 -torch.nn.MarginRankingLoss(size_average=False) +torch.nn.MarginRankingLoss(margin=m, size_average=True, reduce=True) +torch.nn.MarginRankingLoss(margin=m, reduce=True) +torch.nn.MarginRankingLoss(margin=m, size_average=True) +torch.nn.MarginRankingLoss(margin=m) # Paddle 写法 -paddle.nn.MarginRankingLoss(reduction='sum') +paddle.nn.MarginRankingLoss(margin=m, reduction='mean') ``` -#### reduce -reduce 为 True -```python -# PyTorch 写法 -torch.nn.MarginRankingLoss(reduce=True) -# Paddle 写法 -paddle.nn.MarginRankingLoss(reduction='sum') -``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.MarginRankingLoss(reduce=False) - -# Paddle 写法 -paddle.nn.MarginRankingLoss(reduction='none') -``` -#### reduction -reduction 为'none' -```python -# PyTorch 写法 -torch.nn.MarginRankingLoss(reduction='none') - -# Paddle 写法 -paddle.nn.MarginRankingLoss(reduction='none') -``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.MarginRankingLoss(reduction='mean') - -# Paddle 写法 -paddle.nn.MarginRankingLoss(reduction='mean') -``` -reduction 为'sum' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.MarginRankingLoss(reduction='sum') +torch.nn.MarginRankingLoss(margin=m, size_average=True, reduce=False) +torch.nn.MarginRankingLoss(margin=m, size_average=False, reduce=False) +torch.nn.MarginRankingLoss(margin=m, reduce=False) # Paddle 写法 -paddle.nn.MarginRankingLoss(reduction='sum') +paddle.nn.MarginRankingLoss(margin=m, reduction='none') ``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MultiLabelMarginLoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MultiLabelMarginLoss.md index fc76c63fcca..bc89f4633fd 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MultiLabelMarginLoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MultiLabelMarginLoss.md @@ -20,65 +20,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | reduction | reduction | 指定应用于输出结果的计算方式。 | ### 转写示例 - - -#### size_average -size_average 为 True -```python -# PyTorch 写法 -torch.nn.MultiLabelMarginLoss(size_average=True) - -# Paddle 写法 -paddle.nn.MultiLabelMarginLoss(reduction='mean') -``` - -size_average 为 False +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 +torch.nn.MultiLabelMarginLoss(size_average=False, reduce=True) torch.nn.MultiLabelMarginLoss(size_average=False) # Paddle 写法 paddle.nn.MultiLabelMarginLoss(reduction='sum') ``` -#### reduce -reduce 为 True + +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 +torch.nn.MultiLabelMarginLoss(size_average=True, reduce=True) torch.nn.MultiLabelMarginLoss(reduce=True) +torch.nn.MultiLabelMarginLoss(size_average=True) +torch.nn.MultiLabelMarginLoss() # Paddle 写法 -paddle.nn.MultiLabelMarginLoss(reduction='sum') +paddle.nn.MultiLabelMarginLoss(reduction='mean') ``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.MultiLabelMarginLoss(reduce=False) -# Paddle 写法 -paddle.nn.MultiLabelMarginLoss(reduction='none') -``` -#### reduction -reduction 为'none' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.MultiLabelMarginLoss(reduction='none') +torch.nn.MultiLabelMarginLoss(size_average=True, reduce=False) +torch.nn.MultiLabelMarginLoss(size_average=False, reduce=False) +torch.nn.MultiLabelMarginLoss(reduce=False) # Paddle 写法 paddle.nn.MultiLabelMarginLoss(reduction='none') ``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.MultiLabelMarginLoss(reduction='mean') - -# Paddle 写法 -paddle.nn.MultiLabelMarginLoss(reduction='mean') -``` -reduction 为'sum' -```python -# PyTorch 写法 -torch.nn.MultiLabelMarginLoss(reduction='sum') - -# Paddle 写法 -paddle.nn.MultiLabelMarginLoss(reduction='sum') -``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MultiLabelSoftMarginLoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MultiLabelSoftMarginLoss.md index a96567650fa..62f387961f5 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MultiLabelSoftMarginLoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MultiLabelSoftMarginLoss.md @@ -21,65 +21,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | reduction | reduction | 指定应用于输出结果的计算方式。 | ### 转写示例 - - -#### size_average -size_average 为 True +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 -torch.nn.MultiLabelSoftMarginLoss(size_average=True) +torch.nn.MultiLabelSoftMarginLoss(weight=w, size_average=False, reduce=True) +torch.nn.MultiLabelSoftMarginLoss(weight=w, size_average=False) # Paddle 写法 -paddle.nn.MultiLabelSoftMarginLoss(reduction='mean') +paddle.nn.MultiLabelSoftMarginLoss(weight=w, reduction='sum') ``` -size_average 为 False +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 -torch.nn.MultiLabelSoftMarginLoss(size_average=False) +torch.nn.MultiLabelSoftMarginLoss(weight=w, size_average=True, reduce=True) +torch.nn.MultiLabelSoftMarginLoss(weight=w, reduce=True) +torch.nn.MultiLabelSoftMarginLoss(weight=w, size_average=True) +torch.nn.MultiLabelSoftMarginLoss(weight=w) # Paddle 写法 -paddle.nn.MultiLabelSoftMarginLoss(reduction='sum') +paddle.nn.MultiLabelSoftMarginLoss(weight=w, reduction='mean') ``` -#### reduce -reduce 为 True -```python -# PyTorch 写法 -torch.nn.MultiLabelSoftMarginLoss(reduce=True) -# Paddle 写法 -paddle.nn.MultiLabelSoftMarginLoss(reduction='sum') -``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.MultiLabelSoftMarginLoss(reduce=False) - -# Paddle 写法 -paddle.nn.MultiLabelSoftMarginLoss(reduction='none') -``` -#### reduction -reduction 为'none' -```python -# PyTorch 写法 -torch.nn.MultiLabelSoftMarginLoss(reduction='none') - -# Paddle 写法 -paddle.nn.MultiLabelSoftMarginLoss(reduction='none') -``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.MultiLabelSoftMarginLoss(reduction='mean') - -# Paddle 写法 -paddle.nn.MultiLabelSoftMarginLoss(reduction='mean') -``` -reduction 为'sum' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.MultiLabelSoftMarginLoss(reduction='sum') +torch.nn.MultiLabelSoftMarginLoss(weight=w, size_average=True, reduce=False) +torch.nn.MultiLabelSoftMarginLoss(weight=w, size_average=False, reduce=False) +torch.nn.MultiLabelSoftMarginLoss(weight=w, reduce=False) # Paddle 写法 -paddle.nn.MultiLabelSoftMarginLoss(reduction='sum') +paddle.nn.MultiLabelSoftMarginLoss(weight=w, reduction='none') ``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MultiMarginLoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MultiMarginLoss.md index 05179773cf6..fbf1da3bba3 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MultiMarginLoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.MultiMarginLoss.md @@ -23,65 +23,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | reduction | reduction | 指定应用于输出结果的计算方式,可选值有 `none`、`mean` 和 `sum`。默认为 `mean`,计算 mini-batch loss 均值。设置为 `sum` 时,计算 mini-batch loss 的总和。设置为 `none` 时,则返回 loss Tensor。默认值下为 `mean`。 | ### 转写示例 - - -#### size_average -size_average 为 True +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 -torch.nn.MultiMarginLoss(size_average=True) +torch.nn.MultiMarginLoss(p=1, margin=1.0, weight=w, size_average=False, reduce=True) +torch.nn.MultiMarginLoss(p=1, margin=1.0, weight=w, size_average=False) # Paddle 写法 -paddle.nn.MultiMarginLoss(reduction='mean') +paddle.nn.MultiMarginLoss(p=1, margin=1.0, weight=w, reduction='sum') ``` -size_average 为 False +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 -torch.nn.MultiMarginLoss(size_average=False) +torch.nn.MultiMarginLoss(p=1, margin=1.0, weight=w, size_average=True, reduce=True) +torch.nn.MultiMarginLoss(p=1, margin=1.0, weight=w, reduce=True) +torch.nn.MultiMarginLoss(p=1, margin=1.0, weight=w, size_average=True) +torch.nn.MultiMarginLoss(p=1, margin=1.0, weight=w) # Paddle 写法 -paddle.nn.MultiMarginLoss(reduction='sum') +paddle.nn.MultiMarginLoss(p=1, margin=1.0, weight=w, reduction='mean') ``` -#### reduce -reduce 为 True -```python -# PyTorch 写法 -torch.nn.MultiMarginLoss(reduce=True) -# Paddle 写法 -paddle.nn.MultiMarginLoss(reduction='sum') -``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.MultiMarginLoss(reduce=False) - -# Paddle 写法 -paddle.nn.MultiMarginLoss(reduction='none') -``` -#### reduction -reduction 为'none' -```python -# PyTorch 写法 -torch.nn.MultiMarginLoss(reduction='none') - -# Paddle 写法 -paddle.nn.MultiMarginLoss(reduction='none') -``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.MultiMarginLoss(reduction='mean') - -# Paddle 写法 -paddle.nn.MultiMarginLoss(reduction='mean') -``` -reduction 为'sum' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.MultiMarginLoss(reduction='sum') +torch.nn.MultiMarginLoss(p=1, margin=1.0, weight=w, size_average=True, reduce=False) +torch.nn.MultiMarginLoss(p=1, margin=1.0, weight=w, size_average=False, reduce=False) +torch.nn.MultiMarginLoss(p=1, margin=1.0, weight=w, reduce=False) # Paddle 写法 -paddle.nn.MultiMarginLoss(reduction='sum') +paddle.nn.MultiMarginLoss(p=1, margin=1.0, weight=w, reduction='none') ``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.NLLLoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.NLLLoss.md index 8a1904b4d65..b1c3993c1e9 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.NLLLoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.NLLLoss.md @@ -29,65 +29,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | reduction | reduction | 表示应用于输出结果的计算方式。 | ### 转写示例 - - -#### size_average -size_average 为 True +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 -torch.nn.NLLLoss(size_average=True) +torch.nn.NLLLoss(weight=w, ignore_index=-100, size_average=False, reduce=True) +torch.nn.NLLLoss(weight=w, ignore_index=-100, size_average=False) # Paddle 写法 -paddle.nn.NLLLoss(reduction='mean') +paddle.nn.NLLLoss(weight=w, ignore_index=-100, reduction='sum') ``` -size_average 为 False +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 -torch.nn.NLLLoss(size_average=False) +torch.nn.NLLLoss(weight=w, ignore_index=-100, size_average=True, reduce=True) +torch.nn.NLLLoss(weight=w, ignore_index=-100, reduce=True) +torch.nn.NLLLoss(weight=w, ignore_index=-100, size_average=True) +torch.nn.NLLLoss(weight=w, ignore_index=-100) # Paddle 写法 -paddle.nn.NLLLoss(reduction='sum') +paddle.nn.NLLLoss(weight=w, ignore_index=-100, reduction='mean') ``` -#### reduce -reduce 为 True -```python -# PyTorch 写法 -torch.nn.NLLLoss(reduce=True) -# Paddle 写法 -paddle.nn.NLLLoss(reduction='sum') -``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.NLLLoss(reduce=False) - -# Paddle 写法 -paddle.nn.NLLLoss(reduction='none') -``` -#### reduction -reduction 为'none' -```python -# PyTorch 写法 -torch.nn.NLLLoss(reduction='none') - -# Paddle 写法 -paddle.nn.NLLLoss(reduction='none') -``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.NLLLoss(reduction='mean') - -# Paddle 写法 -paddle.nn.NLLLoss(reduction='mean') -``` -reduction 为'sum' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.NLLLoss(reduction='sum') +torch.nn.NLLLoss(weight=w, ignore_index=-100, size_average=True, reduce=False) +torch.nn.NLLLoss(weight=w, ignore_index=-100, size_average=False, reduce=False) +torch.nn.NLLLoss(weight=w, ignore_index=-100, reduce=False) # Paddle 写法 -paddle.nn.NLLLoss(reduction='sum') +paddle.nn.NLLLoss(weight=w, ignore_index=-100, reduction='none') ``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.PoissonNLLLoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.PoissonNLLLoss.md index f7161f5e8b2..04e33c4342c 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.PoissonNLLLoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.PoissonNLLLoss.md @@ -23,65 +23,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | reduction | reduction | 指定应用于输出结果的计算方式,可选值有 `none`、`mean` 和 `sum`。默认为 `mean`,计算 mini-batch loss 均值。设置为 `sum` 时,计算 mini-batch loss 的总和。设置为 `none` 时,则返回 loss Tensor。默认值下为 `mean`。两者完全一致。 | ### 转写示例 - - -#### size_average -size_average 为 True +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 -torch.nn.PoissonNLLLoss(size_average=True) +torch.nn.PoissonNLLLoss(log_input=True, full=False, eps=1e-8, size_average=False, reduce=True) +torch.nn.PoissonNLLLoss(log_input=True, full=False, eps=1e-8, size_average=False) # Paddle 写法 -paddle.nn.PoissonNLLLoss(reduction='mean') +paddle.nn.PoissonNLLLoss(log_input=True, full=False, epsilon=1e-8, reduction='sum') ``` -size_average 为 False +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 -torch.nn.PoissonNLLLoss(size_average=False) +torch.nn.PoissonNLLLoss(log_input=True, full=False, eps=1e-8, size_average=True, reduce=True) +torch.nn.PoissonNLLLoss(log_input=True, full=False, eps=1e-8, reduce=True) +torch.nn.PoissonNLLLoss(log_input=True, full=False, eps=1e-8, size_average=True) +torch.nn.PoissonNLLLoss(log_input=True, full=False, eps=1e-8) # Paddle 写法 -paddle.nn.PoissonNLLLoss(reduction='sum') +paddle.nn.PoissonNLLLoss(log_input=True, full=False, epsilon=1e-8, reduction='mean') ``` -#### reduce -reduce 为 True -```python -# PyTorch 写法 -torch.nn.PoissonNLLLoss(reduce=True) -# Paddle 写法 -paddle.nn.PoissonNLLLoss(reduction='sum') -``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.PoissonNLLLoss(reduce=False) - -# Paddle 写法 -paddle.nn.PoissonNLLLoss(reduction='none') -``` -#### reduction -reduction 为'none' -```python -# PyTorch 写法 -torch.nn.PoissonNLLLoss(reduction='none') - -# Paddle 写法 -paddle.nn.PoissonNLLLoss(reduction='none') -``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.PoissonNLLLoss(reduction='mean') - -# Paddle 写法 -paddle.nn.PoissonNLLLoss(reduction='mean') -``` -reduction 为'sum' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.PoissonNLLLoss(reduction='sum') +torch.nn.PoissonNLLLoss(log_input=True, full=False, eps=1e-8, size_average=True, reduce=False) +torch.nn.PoissonNLLLoss(log_input=True, full=False, eps=1e-8, size_average=False, reduce=False) +torch.nn.PoissonNLLLoss(log_input=True, full=False, eps=1e-8, reduce=False) # Paddle 写法 -paddle.nn.PoissonNLLLoss(reduction='sum') +paddle.nn.PoissonNLLLoss(log_input=True, full=False, epsilon=1e-8, reduction='none') ``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.SmoothL1Loss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.SmoothL1Loss.md index 596a6ca811d..83cbbc27047 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.SmoothL1Loss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.SmoothL1Loss.md @@ -27,65 +27,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | - | is_huber | 控制 huber_loss 与 smooth_l1_loss 的开关,Paddle 需设置为 False 。 | ### 转写示例 - - -#### size_average -size_average 为 True +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 -torch.nn.SmoothL1Loss(size_average=True) +torch.nn.SmoothL1Loss(beta=1.0, size_average=False, reduce=True) +torch.nn.SmoothL1Loss(beta=1.0, size_average=False) # Paddle 写法 -paddle.nn.SmoothL1Loss(reduction='mean') +paddle.nn.SmoothL1Loss(reduction='sum', is_huber=False) ``` -size_average 为 False -```python -# PyTorch 写法 -torch.nn.SmoothL1Loss(size_average=False) - -# Paddle 写法 -paddle.nn.SmoothL1Loss(reduction='sum') -``` -#### reduce -reduce 为 True +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 +torch.nn.SmoothL1Loss(size_average=True, reduce=True) torch.nn.SmoothL1Loss(reduce=True) +torch.nn.SmoothL1Loss(size_average=True) +torch.nn.SmoothL1Loss() # Paddle 写法 -paddle.nn.SmoothL1Loss(reduction='sum') -``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.SmoothL1Loss(reduce=False) - -# Paddle 写法 -paddle.nn.SmoothL1Loss(reduction='none') -``` -#### reduction -reduction 为'none' -```python -# PyTorch 写法 -torch.nn.SmoothL1Loss(reduction='none') - -# Paddle 写法 -paddle.nn.SmoothL1Loss(reduction='none') +paddle.nn.SmoothL1Loss(reduction='mean',is_huber=False) ``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.SmoothL1Loss(reduction='mean') -# Paddle 写法 -paddle.nn.SmoothL1Loss(reduction='mean') -``` -reduction 为'sum' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.SmoothL1Loss(reduction='sum') +torch.nn.SmoothL1Loss(size_average=True, reduce=False) +torch.nn.SmoothL1Loss(size_average=False, reduce=False) +torch.nn.SmoothL1Loss(reduce=False) # Paddle 写法 -paddle.nn.SmoothL1Loss(reduction='sum') +paddle.nn.SmoothL1Loss(reduction='none',is_huber=False) ``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.SoftMarginLoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.SoftMarginLoss.md index 5aa71ee2502..e4e7d902d85 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.SoftMarginLoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.SoftMarginLoss.md @@ -23,65 +23,35 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | reduction | reduction | 表示应用于输出结果的计算方式。 | ### 转写示例 - - -#### size_average -size_average 为 True -```python -# PyTorch 写法 -torch.nn.SoftMarginLoss(size_average=True) - -# Paddle 写法 -paddle.nn.SoftMarginLoss(reduction='mean') -``` - -size_average 为 False +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 +torch.nn.SoftMarginLoss(size_average=False, reduce=True) torch.nn.SoftMarginLoss(size_average=False) # Paddle 写法 paddle.nn.SoftMarginLoss(reduction='sum') ``` -#### reduce -reduce 为 True + +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 +torch.nn.SoftMarginLoss(size_average=True, reduce=True) torch.nn.SoftMarginLoss(reduce=True) +torch.nn.SoftMarginLoss(size_average=True) +torch.nn.SoftMarginLoss() # Paddle 写法 -paddle.nn.SoftMarginLoss(reduction='sum') +paddle.nn.SoftMarginLoss(reduction='mean') ``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.SoftMarginLoss(reduce=False) -# Paddle 写法 -paddle.nn.SoftMarginLoss(reduction='none') -``` -#### reduction -reduction 为'none' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.SoftMarginLoss(reduction='none') +torch.nn.SoftMarginLoss(size_average=True, reduce=False) +torch.nn.SoftMarginLoss(size_average=False, reduce=False) +torch.nn.SoftMarginLoss(reduce=False) # Paddle 写法 paddle.nn.SoftMarginLoss(reduction='none') ``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.SoftMarginLoss(reduction='mean') - -# Paddle 写法 -paddle.nn.SoftMarginLoss(reduction='mean') -``` -reduction 为'sum' -```python -# PyTorch 写法 -torch.nn.SoftMarginLoss(reduction='sum') - -# Paddle 写法 -paddle.nn.SoftMarginLoss(reduction='sum') -``` diff --git a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.TripletMarginLoss.md b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.TripletMarginLoss.md index 5042f565e3e..19e3a2c90d1 100644 --- a/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.TripletMarginLoss.md +++ b/docs/guides/model_convert/convert_from_pytorch/api_difference/torch_more_args/torch.nn.TripletMarginLoss.md @@ -24,65 +24,38 @@ PyTorch 相比 Paddle 支持更多其他参数,具体如下: | reduction | reduction | 指定应用于输出结果的计算方式。 | ### 转写示例 - - -#### size_average -size_average 为 True +#### size_average/reduce:对应到 reduction 为 sum ```python # PyTorch 写法 -torch.nn.TripletMarginLoss(size_average=True) +torch.nn.TripletMarginLoss(margin=m, size_average=False, reduce=True) +torch.nn.TripletMarginLoss(margin=m, size_average=False) # Paddle 写法 -paddle.nn.TripletMarginLoss(reduction='mean') +## 以上写法都统一对应到如下写法 +paddle.nn.TripletMarginLoss(margin=m, reduction='sum') ``` -size_average 为 False +#### size_average/reduce:对应到 reduction 为 mean ```python # PyTorch 写法 -torch.nn.TripletMarginLoss(size_average=False) +torch.nn.TripletMarginLoss(margin=m, size_average=True, reduce=True) +torch.nn.TripletMarginLoss(margin=m, reduce=True) +torch.nn.TripletMarginLoss(margin=m, size_average=True) +torch.nn.TripletMarginLoss(margin=m) # Paddle 写法 -paddle.nn.TripletMarginLoss(reduction='sum') +## 以上写法都统一对应到如下写法 +paddle.nn.TripletMarginLoss(margin=m, reduction='mean') ``` -#### reduce -reduce 为 True -```python -# PyTorch 写法 -torch.nn.TripletMarginLoss(reduce=True) -# Paddle 写法 -paddle.nn.TripletMarginLoss(reduction='sum') -``` -reduce 为 False -```python -# PyTorch 写法 -torch.nn.TripletMarginLoss(reduce=False) - -# Paddle 写法 -paddle.nn.TripletMarginLoss(reduction='none') -``` -#### reduction -reduction 为'none' -```python -# PyTorch 写法 -torch.nn.TripletMarginLoss(reduction='none') - -# Paddle 写法 -paddle.nn.TripletMarginLoss(reduction='none') -``` -reduction 为'mean' -```python -# PyTorch 写法 -torch.nn.TripletMarginLoss(reduction='mean') - -# Paddle 写法 -paddle.nn.TripletMarginLoss(reduction='mean') -``` -reduction 为'sum' +#### size_average/reduce:对应到 reduction 为 none ```python # PyTorch 写法 -torch.nn.TripletMarginLoss(reduction='sum') +torch.nn.TripletMarginLoss(margin=m, size_average=True, reduce=False) +torch.nn.TripletMarginLoss(margin=m, size_average=False, reduce=False) +torch.nn.TripletMarginLoss(margin=m, reduce=False) # Paddle 写法 -paddle.nn.TripletMarginLoss(reduction='sum') +## 以上写法都统一对应到如下写法 +paddle.nn.TripletMarginLoss(margin=m, reduction='none') ```