Skip to content

Commit

Permalink
[Fix doc example] fix missing import jnp (#15291)
Browse files Browse the repository at this point in the history
* fix missing import jnp

* Fix missing jax and k=1

Co-authored-by: ydshieh <ydshieh@users.noreply.github.com>
  • Loading branch information
ydshieh and ydshieh committed Jan 24, 2022
1 parent eac4aec commit c15bb3f
Show file tree
Hide file tree
Showing 6 changed files with 16 additions and 2 deletions.
5 changes: 4 additions & 1 deletion src/transformers/models/bart/modeling_flax_bart.py
Original file line number Diff line number Diff line change
Expand Up @@ -1085,6 +1085,7 @@ def decode(
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import BartTokenizer, FlaxBartForConditionalGeneration
>>> model = FlaxBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")
Expand Down Expand Up @@ -1353,6 +1354,7 @@ def decode(
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import BartTokenizer, FlaxBartForConditionalGeneration
>>> model = FlaxBartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")
Expand Down Expand Up @@ -1525,6 +1527,7 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs):
Mask filling example:
```python
>>> import jax
>>> from transformers import BartTokenizer, FlaxBartForConditionalGeneration
>>> model = FlaxBartForConditionalGeneration.from_pretrained("facebook/bart-large")
Expand All @@ -1536,7 +1539,7 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs):
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero()[0].item()
>>> probs = jax.nn.softmax(logits[0, masked_index], axis=0)
>>> values, predictions = jax.lax.top_k(probs)
>>> values, predictions = jax.lax.top_k(probs, k=1)
>>> tokenizer.decode(predictions).split()
```
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1048,6 +1048,7 @@ def decode(
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration
>>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
Expand Down Expand Up @@ -1317,6 +1318,7 @@ def decode(
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import BlenderbotTokenizer, FlaxBlenderbotForConditionalGeneration
>>> model = FlaxBlenderbotForConditionalGeneration.from_pretrained("facebook/blenderbot-400M-distill")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1060,6 +1060,7 @@ def decode(
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration
>>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
Expand Down Expand Up @@ -1329,6 +1330,7 @@ def decode(
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import BlenderbotSmallTokenizer, FlaxBlenderbotSmallForConditionalGeneration
>>> model = FlaxBlenderbotSmallForConditionalGeneration.from_pretrained("facebook/blenderbot_small-90M")
Expand Down
2 changes: 2 additions & 0 deletions src/transformers/models/marian/modeling_flax_marian.py
Original file line number Diff line number Diff line change
Expand Up @@ -1051,6 +1051,7 @@ def decode(
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import MarianTokenizer, FlaxMarianMTModel
>>> tokenizer = MarianTokenizer.from_pretrained("facebook/marian-large-cnn")
Expand Down Expand Up @@ -1319,6 +1320,7 @@ def decode(
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import MarianTokenizer, FlaxMarianMTModel
>>> model = FlaxMarianMTModel.from_pretrained("Helsinki-NLP/opus-mt-en-de")
Expand Down
2 changes: 2 additions & 0 deletions src/transformers/models/pegasus/modeling_flax_pegasus.py
Original file line number Diff line number Diff line change
Expand Up @@ -1058,6 +1058,7 @@ def decode(
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import PegasusTokenizer, FlaxPegasusForConditionalGeneration
>>> model = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-large")
Expand Down Expand Up @@ -1327,6 +1328,7 @@ def decode(
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import PegasusTokenizer, FlaxPegasusForConditionalGeneration
>>> model = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-large")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2188,6 +2188,7 @@ def decode(
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration
>>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
Expand Down Expand Up @@ -2455,6 +2456,7 @@ def decode(
Example:
```python
>>> import jax.numpy as jnp
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration
>>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
Expand Down Expand Up @@ -2627,6 +2629,7 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs):
Mask filling example:
```python
>>> import jax
>>> from transformers import {{cookiecutter.camelcase_modelname}}Tokenizer, Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration
>>> model = Flax{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}')
Expand All @@ -2638,7 +2641,7 @@ def update_inputs_for_generation(self, model_outputs, model_kwargs):
>>> logits = model(input_ids).logits
>>> masked_index = (input_ids[0] == tokenizer.mask_token_id).nonzero().item()
>>> probs = jax.nn.softmax(logits[0, masked_index], axis=0)
>>> values, predictions = jax.lax.top_k(probs)
>>> values, predictions = jax.lax.top_k(probs, k=1)
>>> tokenizer.decode(predictions).split()
```
Expand Down

0 comments on commit c15bb3f

Please sign in to comment.