Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove deprecated code #638

Merged
merged 6 commits into from
Nov 25, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0

### Removed

- Removed `embedding_similarity` metric ([#638](https://github.com/PyTorchLightning/metrics/pull/638))
- Removed argument `concatenate_texts` from `wer` metric ([#638](https://github.com/PyTorchLightning/metrics/pull/638))
- Removed arguments `newline_sep` and `decimal_places` from `rouge` metric ([#638](https://github.com/PyTorchLightning/metrics/pull/638))

### Fixed

Expand Down
46 changes: 0 additions & 46 deletions tests/functional/test_self_supervised.py

This file was deleted.

2 changes: 0 additions & 2 deletions torchmetrics/functional/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,6 @@
from torchmetrics.functional.retrieval.r_precision import retrieval_r_precision
from torchmetrics.functional.retrieval.recall import retrieval_recall
from torchmetrics.functional.retrieval.reciprocal_rank import retrieval_reciprocal_rank
from torchmetrics.functional.self_supervised import embedding_similarity
from torchmetrics.functional.text.bert import bert_score
from torchmetrics.functional.text.bleu import bleu_score
from torchmetrics.functional.text.cer import char_error_rate
Expand All @@ -89,7 +88,6 @@
"cosine_similarity",
"tweedie_deviance_score",
"dice_score",
"embedding_similarity",
"explained_variance",
"f1",
"fbeta",
Expand Down
57 changes: 0 additions & 57 deletions torchmetrics/functional/self_supervised.py

This file was deleted.

8 changes: 1 addition & 7 deletions torchmetrics/functional/text/wer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import List, Optional, Tuple, Union
from warnings import warn
from typing import List, Tuple, Union

import torch
from torch import Tensor, tensor
Expand Down Expand Up @@ -65,7 +64,6 @@ def _wer_compute(errors: Tensor, total: Tensor) -> Tensor:
def wer(
predictions: Union[str, List[str]],
references: Union[str, List[str]],
concatenate_texts: Optional[bool] = None, # TODO: remove in v0.7
) -> Tensor:
"""Word error rate (WER_) is a common metric of the performance of an automatic speech recognition system. This
value indicates the percentage of words that were incorrectly predicted. The lower the value, the better the
Expand All @@ -74,8 +72,6 @@ def wer(
Args:
predictions: Transcription(s) to score as a string or list of strings
references: Reference(s) for each speech input as a string or list of strings
concatenate_texts: Whether to concatenate all input texts or compute WER iteratively
This argument is deprecated in v0.6 and it will be removed in v0.7.

Returns:
Word error rate score
Expand All @@ -86,7 +82,5 @@ def wer(
>>> wer(predictions=predictions, references=references)
tensor(0.5000)
"""
if concatenate_texts is not None:
warn("`concatenate_texts` has been deprecated in v0.6 and it will be removed in v0.7", DeprecationWarning)
errors, total = _wer_update(predictions, references)
return _wer_compute(errors, total)
14 changes: 0 additions & 14 deletions torchmetrics/text/rouge.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import Any, Callable, Dict, List, Optional, Tuple, Union

from torch import Tensor
Expand All @@ -26,17 +25,11 @@ class ROUGEScore(Metric):
of the `rouge-score` package `Python ROUGE Implementation`

Args:
newline_sep:
New line separate the inputs.
This argument has not been in use any more. It is deprecated in v0.6 and will be removed in v0.7.
use_stemmer:
Use Porter stemmer to strip word suffixes to improve matching.
rouge_keys:
A list of rouge types to calculate.
Keys that are allowed are ``rougeL``, ``rougeLsum``, and ``rouge1`` through ``rouge9``.
decimal_places:
The number of digits to round the computed the values to.
This argument has not been in usd any more. It is deprecated in v0.6 and will be removed in v0.7.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False. default: True
dist_sync_on_step:
Expand Down Expand Up @@ -82,10 +75,8 @@ class ROUGEScore(Metric):

def __init__(
self,
newline_sep: Optional[bool] = None, # remove in v0.7
use_stemmer: bool = False,
rouge_keys: Union[str, Tuple[str, ...]] = ("rouge1", "rouge2", "rougeL", "rougeLsum"), # type: ignore
decimal_places: Optional[bool] = None, # remove in v0.7
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
Expand All @@ -97,11 +88,6 @@ def __init__(
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
if newline_sep is not None:
warnings.warn("Argument `newline_sep` is deprecated in v0.6 and will be removed in v0.7")
if decimal_places is not None:
warnings.warn("Argument `decimal_places` is deprecated in v0.6 and will be removed in v0.7")

if use_stemmer or "rougeLsum" in rouge_keys:
if not _NLTK_AVAILABLE:
raise ValueError("Stemmer and/or `rougeLsum` requires that nltk is installed. Use `pip install nltk`.")
Expand Down
7 changes: 0 additions & 7 deletions torchmetrics/text/wer.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import Any, Callable, List, Optional, Union
from warnings import warn

import torch
from torch import Tensor, tensor
Expand Down Expand Up @@ -42,8 +40,6 @@ class WER(Metric):
Compute WER score of transcribed segments against references.

Args:
concatenate_texts: Whether to concatenate all input texts or compute WER iteratively.
This argument is deprecated in v0.6 and it will be removed in v0.7.
compute_on_step:
Forward only calls ``update()`` and return None if this is set to False. default: True
dist_sync_on_step:
Expand Down Expand Up @@ -72,7 +68,6 @@ class WER(Metric):

def __init__(
self,
concatenate_texts: Optional[bool] = None, # TODO: remove in v0.7
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
Expand All @@ -84,8 +79,6 @@ def __init__(
process_group=process_group,
dist_sync_fn=dist_sync_fn,
)
if concatenate_texts is not None:
warn("`concatenate_texts` has been deprecated in v0.6 and it will be removed in v0.7", DeprecationWarning)
self.add_state("errors", tensor(0, dtype=torch.float), dist_reduce_fx="sum")
self.add_state("total", tensor(0, dtype=torch.float), dist_reduce_fx="sum")

Expand Down