Skip to content

Commit

Permalink
fix: links in docstrings (#859)
Browse files Browse the repository at this point in the history
  • Loading branch information
bagxi committed Jun 25, 2020
1 parent f5134f9 commit c3fcb48
Show file tree
Hide file tree
Showing 6 changed files with 28 additions and 16 deletions.
5 changes: 3 additions & 2 deletions catalyst/contrib/data/transforms.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
"""
This subpackage was borrowed from
torchvision(https://github.com/pytorch/vision).
This subpackage was borrowed from `torchvision`__.
__ https://github.com/pytorch/vision
"""

import numpy as np
Expand Down
8 changes: 5 additions & 3 deletions catalyst/contrib/nn/criterion/circle.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,8 @@ def _convert_label_to_similarity(

class CircleLoss(nn.Module):
"""
CircleLoss from
"Circle Loss: A Unified Perspective of Pair Similarity Optimization"
https://arxiv.org/abs/2002.10857
CircleLoss from `Circle Loss: A Unified Perspective
of Pair Similarity Optimization`_ paper.
Adapter from:
https://github.com/TinyZeaMays/CircleLoss
Expand All @@ -41,6 +40,9 @@ class CircleLoss(nn.Module):
>>> labels = torch.randint(high=10, size=(256,))
>>> criterion = CircleLoss(margin=0.25, gamma=256)
>>> criterion(features, labels)
.. _`Circle Loss: A Unified Perspective of Pair Similarity Optimization`:
https://arxiv.org/abs/2002.10857
"""

def __init__(self, margin: float, gamma: float) -> None:
Expand Down
3 changes: 1 addition & 2 deletions catalyst/contrib/nn/criterion/triplet.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@
class TripletLoss(nn.Module):
"""Triplet loss with hard positive/negative mining.
Reference:
Code imported from https://github.com/NegatioN/OnlineMiningTripletLoss
Adapted from: https://github.com/NegatioN/OnlineMiningTripletLoss
"""

def __init__(self, margin: float = 0.3):
Expand Down
16 changes: 11 additions & 5 deletions catalyst/contrib/nn/modules/se.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
class cSE(nn.Module): # noqa: N801
"""
The channel-wise SE (Squeeze and Excitation) block from the
[Squeeze-and-Excitation Networks](https://arxiv.org/abs/1709.01507) paper.
`Squeeze-and-Excitation Networks`__ paper.
Adapted from
https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/65939
Expand All @@ -17,6 +17,8 @@ class cSE(nn.Module): # noqa: N801
- Input: (batch, channels, height, width)
- Output: (batch, channels, height, width) (same shape as input)
__ https://arxiv.org/abs/1709.01507
"""

def __init__(self, in_channels: int, r: int = 16):
Expand Down Expand Up @@ -48,8 +50,8 @@ def forward(self, x: torch.Tensor):
class sSE(nn.Module): # noqa: N801
"""
The sSE (Channel Squeeze and Spatial Excitation) block from the
[Concurrent Spatial and Channel ‘Squeeze & Excitation’
in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579) paper.
`Concurrent Spatial and Channel ‘Squeeze & Excitation’
in Fully Convolutional Networks`__ paper.
Adapted from
https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/66178
Expand All @@ -58,6 +60,8 @@ class sSE(nn.Module): # noqa: N801
- Input: (batch, channels, height, width)
- Output: (batch, channels, height, width) (same shape as input)
__ https://arxiv.org/abs/1803.02579
"""

def __init__(self, in_channels: int):
Expand All @@ -83,8 +87,8 @@ def forward(self, x: torch.Tensor):
class scSE(nn.Module): # noqa: N801
"""
The scSE (Concurrent Spatial and Channel Squeeze and Channel Excitation)
block from the [Concurrent Spatial and Channel ‘Squeeze & Excitation’
in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579) paper.
block from the `Concurrent Spatial and Channel ‘Squeeze & Excitation’
in Fully Convolutional Networks`__ paper.
Adapted from
https://www.kaggle.com/c/tgs-salt-identification-challenge/discussion/66178
Expand All @@ -93,6 +97,8 @@ class scSE(nn.Module): # noqa: N801
- Input: (batch, channels, height, width)
- Output: (batch, channels, height, width) (same shape as input)
__ https://arxiv.org/abs/1803.02579
"""

def __init__(self, in_channels: int, r: int = 16):
Expand Down
6 changes: 4 additions & 2 deletions catalyst/data/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -292,8 +292,7 @@ class DynamicLenBatchSampler(BatchSampler):
A dynamic batch length data sampler.
Should be used with `catalyst.utils.trim_tensors`.
Adapted from "Dynamic minibatch trimming to improve BERT training speed"
https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/discussion/94779
Adapted from `Dynamic minibatch trimming to improve BERT training speed`_.
Args:
sampler (torch.utils.data.Sampler): Base sampler.
Expand Down Expand Up @@ -321,6 +320,9 @@ class DynamicLenBatchSampler(BatchSampler):
>>> tensors = utils.trim_tensors(batch)
>>> b_input_ids, b_input_mask, b_segment_ids, b_labels = \
>>> tuple(t.to(device) for t in tensors)
.. _`Dynamic minibatch trimming to improve BERT training speed`:
https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/discussion/94779
"""

def __iter__(self):
Expand Down
6 changes: 4 additions & 2 deletions catalyst/utils/torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -330,14 +330,16 @@ def trim_tensors(tensors):
Trim padding off of a batch of tensors to the smallest possible length.
Should be used with `catalyst.data.DynamicLenBatchSampler`.
Adapted from "Dynamic minibatch trimming to improve BERT training speed"
https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/discussion/94779
Adapted from `Dynamic minibatch trimming to improve BERT training speed`_.
Args:
tensors ([torch.tensor]): list of tensors to trim.
Returns:
List[torch.tensor]: list of trimmed tensors.
.. _`Dynamic minibatch trimming to improve BERT training speed`:
https://www.kaggle.com/c/jigsaw-unintended-bias-in-toxicity-classification/discussion/94779
"""
max_len = torch.max(torch.sum((tensors[0] != 0), 1))
if max_len > 2:
Expand Down

0 comments on commit c3fcb48

Please sign in to comment.