Skip to content

Commit

Permalink
Fix syntax for class references (#14644)
Browse files Browse the repository at this point in the history
  • Loading branch information
sgugger authored Dec 6, 2021
1 parent e968887 commit e513c16
Show file tree
Hide file tree
Showing 11 changed files with 25 additions and 25 deletions.
8 changes: 4 additions & 4 deletions src/transformers/generation_flax_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,8 +326,8 @@ def _get_logits_warper(
self, top_k: int = None, top_p: float = None, temperature: float = None
) -> FlaxLogitsProcessorList:
"""
This class returns a :obj:`~transformers.FlaxLogitsProcessorList` list object that contains all relevant
:obj:`~transformers.FlaxLogitsWarper` instances used for multinomial sampling.
This class returns a :class:`~transformers.FlaxLogitsProcessorList` list object that contains all relevant
:class:`~transformers.FlaxLogitsWarper` instances used for multinomial sampling.
"""

# init warp parameters
Expand Down Expand Up @@ -358,8 +358,8 @@ def _get_logits_processor(
forced_eos_token_id: int,
) -> FlaxLogitsProcessorList:
"""
This class returns a :obj:`~transformers.FlaxLogitsProcessorList` list object that contains all relevant
:obj:`~transformers.FlaxLogitsProcessor` instances used to modify the scores of the language model head.
This class returns a :class:`~transformers.FlaxLogitsProcessorList` list object that contains all relevant
:class:`~transformers.FlaxLogitsProcessor` instances used to modify the scores of the language model head.
"""
processors = FlaxLogitsProcessorList()

Expand Down
8 changes: 4 additions & 4 deletions src/transformers/generation_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -535,8 +535,8 @@ def _get_logits_warper(
self, top_k: int = None, top_p: float = None, temperature: float = None, num_beams: int = None
) -> LogitsProcessorList:
"""
This class returns a :obj:`~transformers.LogitsProcessorList` list object that contains all relevant
:obj:`~transformers.LogitsWarper` instances used for multinomial sampling.
This class returns a :class:`~transformers.LogitsProcessorList` list object that contains all relevant
:class:`~transformers.LogitsWarper` instances used for multinomial sampling.
"""

# init warp parameters
Expand Down Expand Up @@ -575,8 +575,8 @@ def _get_logits_processor(
remove_invalid_values: bool,
) -> LogitsProcessorList:
"""
This class returns a :obj:`~transformers.LogitsProcessorList` list object that contains all relevant
:obj:`~transformers.LogitsProcessor` instances used to modify the scores of the language model head.
This class returns a :class:`~transformers.LogitsProcessorList` list object that contains all relevant
:class:`~transformers.LogitsProcessor` instances used to modify the scores of the language model head.
"""
processors = LogitsProcessorList()

Expand Down
2 changes: 1 addition & 1 deletion src/transformers/modeling_tf_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -737,7 +737,7 @@ def serving_output(output):
Prepare the output of the saved model. Each model must implement this function.
Args:
output (:obj:`~transformers.TFBaseModelOutput`):
output (:class:`~transformers.TFBaseModelOutput`):
The output returned by the model.
"""
raise NotImplementedError
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/tapas/tokenization_tapas.py
Original file line number Diff line number Diff line change
Expand Up @@ -1277,7 +1277,7 @@ def _get_truncated_table_rows(
Total number of table columns
max_length (:obj:`int`):
Total maximum length.
truncation_strategy (:obj:`str` or :obj:`~transformers.TapasTruncationStrategy`):
truncation_strategy (:obj:`str` or :class:`~transformers.TapasTruncationStrategy`):
Truncation strategy to use. Seeing as this method should only be called when truncating, the only
available strategy is the :obj:`"drop_rows_to_fit"` strategy.
Expand Down
8 changes: 4 additions & 4 deletions src/transformers/pipelines/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -372,29 +372,29 @@ def pipeline(
- :obj:`"summarization"`: will return a :class:`~transformers.SummarizationPipeline`:.
- :obj:`"zero-shot-classification"`: will return a :class:`~transformers.ZeroShotClassificationPipeline`:.
model (:obj:`str` or :obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`, `optional`):
model (:obj:`str` or :class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`, `optional`):
The model that will be used by the pipeline to make predictions. This can be a model identifier or an
actual instance of a pretrained model inheriting from :class:`~transformers.PreTrainedModel` (for PyTorch)
or :class:`~transformers.TFPreTrainedModel` (for TensorFlow).
If not provided, the default for the :obj:`task` will be loaded.
config (:obj:`str` or :obj:`~transformers.PretrainedConfig`, `optional`):
config (:obj:`str` or :class:`~transformers.PretrainedConfig`, `optional`):
The configuration that will be used by the pipeline to instantiate the model. This can be a model
identifier or an actual pretrained model configuration inheriting from
:class:`~transformers.PretrainedConfig`.
If not provided, the default configuration file for the requested model will be used. That means that if
:obj:`model` is given, its default configuration will be used. However, if :obj:`model` is not supplied,
this :obj:`task`'s default model's config is used instead.
tokenizer (:obj:`str` or :obj:`~transformers.PreTrainedTokenizer`, `optional`):
tokenizer (:obj:`str` or :class:`~transformers.PreTrainedTokenizer`, `optional`):
The tokenizer that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained tokenizer inheriting from :class:`~transformers.PreTrainedTokenizer`.
If not provided, the default tokenizer for the given :obj:`model` will be loaded (if it is a string). If
:obj:`model` is not specified or not a string, then the default tokenizer for :obj:`config` is loaded (if
it is a string). However, if :obj:`config` is also not given or not a string, then the default tokenizer
for the given :obj:`task` will be loaded.
feature_extractor (:obj:`str` or :obj:`~transformers.PreTrainedFeatureExtractor`, `optional`):
feature_extractor (:obj:`str` or :class:`~transformers.PreTrainedFeatureExtractor`, `optional`):
The feature extractor that will be used by the pipeline to encode data for the model. This can be a model
identifier or an actual pretrained feature extractor inheriting from
:class:`~transformers.PreTrainedFeatureExtractor`.
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/pipelines/audio_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ def __call__(
**kwargs,
):
"""
Classify the sequence(s) given as inputs. See the :obj:`~transformers.AutomaticSpeechRecognitionPipeline`
Classify the sequence(s) given as inputs. See the :class:`~transformers.AutomaticSpeechRecognitionPipeline`
documentation for more information.
Args:
Expand Down
8 changes: 4 additions & 4 deletions src/transformers/pipelines/automatic_speech_recognition.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,13 +77,13 @@ class AutomaticSpeechRecognitionPipeline(Pipeline):
def __init__(self, feature_extractor: Union["SequenceFeatureExtractor", str], *args, **kwargs):
"""
Arguments:
feature_extractor (:obj:`~transformers.SequenceFeatureExtractor`):
feature_extractor (:class:`~transformers.SequenceFeatureExtractor`):
The feature extractor that will be used by the pipeline to encode waveform for the model.
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
model (:class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting
from :class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel`
for TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
tokenizer (:class:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Expand Down Expand Up @@ -114,7 +114,7 @@ def __call__(
**kwargs,
):
"""
Classify the sequence(s) given as inputs. See the :obj:`~transformers.AutomaticSpeechRecognitionPipeline`
Classify the sequence(s) given as inputs. See the :class:`~transformers.AutomaticSpeechRecognitionPipeline`
documentation for more information.
Args:
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/pipelines/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -644,11 +644,11 @@ def predict(self, X):

PIPELINE_INIT_ARGS = r"""
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
model (:class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
tokenizer (:class:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/pipelines/feature_extraction.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,11 @@ class FeatureExtractionPipeline(Pipeline):
`huggingface.co/models <https://huggingface.co/models>`__.
Arguments:
model (:obj:`~transformers.PreTrainedModel` or :obj:`~transformers.TFPreTrainedModel`):
model (:class:`~transformers.PreTrainedModel` or :class:`~transformers.TFPreTrainedModel`):
The model that will be used by the pipeline to make predictions. This needs to be a model inheriting from
:class:`~transformers.PreTrainedModel` for PyTorch and :class:`~transformers.TFPreTrainedModel` for
TensorFlow.
tokenizer (:obj:`~transformers.PreTrainedTokenizer`):
tokenizer (:class:`~transformers.PreTrainedTokenizer`):
The tokenizer that will be used by the pipeline to encode data for the model. This object inherits from
:class:`~transformers.PreTrainedTokenizer`.
modelcard (:obj:`str` or :class:`~transformers.ModelCard`, `optional`):
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/pipelines/zero_shot_classification.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ def __call__(
**kwargs,
):
"""
Classify the sequence(s) given as inputs. See the :obj:`~transformers.ZeroShotClassificationPipeline`
Classify the sequence(s) given as inputs. See the :class:`~transformers.ZeroShotClassificationPipeline`
documentation for more information.
Args:
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ class Trainer:
compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):
The function that will be used to compute metrics at evaluation. Must take a
:class:`~transformers.EvalPrediction` and return a dictionary string to metric values.
callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`):
callbacks (List of :class:`~transformers.TrainerCallback`, `optional`):
A list of callbacks to customize the training loop. Will add those to the list of default callbacks
detailed in :doc:`here <callback>`.
Expand Down

0 comments on commit e513c16

Please sign in to comment.