"tests/git@developer.sourcefind.cn:OpenDAS/apex.git" did not exist on "6af5980e7acaa715c06da8477f535686bed1b464"
Unverified Commit 04028317 authored by Stas Bekman's avatar Stas Bekman Committed by GitHub
Browse files

consistent nn. and nn.functional: part 5 docs (#12161)

parent 88e84186
...@@ -518,7 +518,7 @@ PyTorch, called ``SimpleModel`` as follows: ...@@ -518,7 +518,7 @@ PyTorch, called ``SimpleModel`` as follows:
.. code:: python .. code:: python
import torch.nn as nn from torch import nn
class SimpleModel(nn.Module): class SimpleModel(nn.Module):
def __init__(self): def __init__(self):
......
...@@ -59,7 +59,7 @@ classification: ...@@ -59,7 +59,7 @@ classification:
.. code-block:: python .. code-block:: python
import torch from torch import nn
from transformers import Trainer from transformers import Trainer
class MultilabelTrainer(Trainer): class MultilabelTrainer(Trainer):
...@@ -67,7 +67,7 @@ classification: ...@@ -67,7 +67,7 @@ classification:
labels = inputs.pop("labels") labels = inputs.pop("labels")
outputs = model(**inputs) outputs = model(**inputs)
logits = outputs.logits logits = outputs.logits
loss_fct = torch.nn.BCEWithLogitsLoss() loss_fct = nn.BCEWithLogitsLoss()
loss = loss_fct(logits.view(-1, self.model.config.num_labels), loss = loss_fct(logits.view(-1, self.model.config.num_labels),
labels.float().view(-1, self.model.config.num_labels)) labels.float().view(-1, self.model.config.num_labels))
return (loss, outputs) if return_outputs else loss return (loss, outputs) if return_outputs else loss
......
...@@ -23,7 +23,7 @@ expected changes: ...@@ -23,7 +23,7 @@ expected changes:
#### 1. AutoTokenizers and pipelines now use fast (rust) tokenizers by default. #### 1. AutoTokenizers and pipelines now use fast (rust) tokenizers by default.
The python and rust tokenizers have roughly the same API, but the rust tokenizers have a more complete feature set. The python and rust tokenizers have roughly the same API, but the rust tokenizers have a more complete feature set.
This introduces two breaking changes: This introduces two breaking changes:
- The handling of overflowing tokens between the python and rust tokenizers is different. - The handling of overflowing tokens between the python and rust tokenizers is different.
...@@ -85,7 +85,7 @@ This is a breaking change as importing intermediary layers using a model's modul ...@@ -85,7 +85,7 @@ This is a breaking change as importing intermediary layers using a model's modul
##### How to obtain the same behavior as v3.x in v4.x ##### How to obtain the same behavior as v3.x in v4.x
In order to obtain the same behavior as version `v3.x`, you should update the path used to access the layers. In order to obtain the same behavior as version `v3.x`, you should update the path used to access the layers.
In version `v3.x`: In version `v3.x`:
```bash ```bash
......
...@@ -265,8 +265,8 @@ Let's apply the SoftMax activation to get predictions. ...@@ -265,8 +265,8 @@ Let's apply the SoftMax activation to get predictions.
.. code-block:: .. code-block::
>>> ## PYTORCH CODE >>> ## PYTORCH CODE
>>> import torch.nn.functional as F >>> from torch import nn
>>> pt_predictions = F.softmax(pt_outputs.logits, dim=-1) >>> pt_predictions = nn.functional.softmax(pt_outputs.logits, dim=-1)
>>> ## TENSORFLOW CODE >>> ## TENSORFLOW CODE
>>> import tensorflow as tf >>> import tensorflow as tf
>>> tf.nn.softmax(tf_outputs.logits, axis=-1) >>> tf.nn.softmax(tf_outputs.logits, axis=-1)
......
...@@ -451,7 +451,7 @@ of tokens. ...@@ -451,7 +451,7 @@ of tokens.
>>> ## PYTORCH CODE >>> ## PYTORCH CODE
>>> from transformers import AutoModelWithLMHead, AutoTokenizer, top_k_top_p_filtering >>> from transformers import AutoModelWithLMHead, AutoTokenizer, top_k_top_p_filtering
>>> import torch >>> import torch
>>> from torch.nn import functional as F >>> from torch import nn
>>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> tokenizer = AutoTokenizer.from_pretrained("gpt2")
>>> model = AutoModelWithLMHead.from_pretrained("gpt2") >>> model = AutoModelWithLMHead.from_pretrained("gpt2")
...@@ -467,7 +467,7 @@ of tokens. ...@@ -467,7 +467,7 @@ of tokens.
>>> filtered_next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=50, top_p=1.0) >>> filtered_next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=50, top_p=1.0)
>>> # sample >>> # sample
>>> probs = F.softmax(filtered_next_token_logits, dim=-1) >>> probs = nn.functional.softmax(filtered_next_token_logits, dim=-1)
>>> next_token = torch.multinomial(probs, num_samples=1) >>> next_token = torch.multinomial(probs, num_samples=1)
>>> generated = torch.cat([input_ids, next_token], dim=-1) >>> generated = torch.cat([input_ids, next_token], dim=-1)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment