Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
42049b8e
"test/git@developer.sourcefind.cn:OpenDAS/torchaudio.git" did not exist on "c375490f4f6d0fe90156767995cfdba14812a95f"
Unverified
Commit
42049b8e
authored
Sep 16, 2020
by
Stas Bekman
Committed by
GitHub
Sep 16, 2020
Browse files
use the correct add_start_docstrings (#7174)
parent
fdaf8ab3
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
6 additions
and
6 deletions
+6
-6
src/transformers/tokenization_marian.py
src/transformers/tokenization_marian.py
+2
-2
src/transformers/tokenization_mbart.py
src/transformers/tokenization_mbart.py
+2
-2
src/transformers/tokenization_pegasus.py
src/transformers/tokenization_pegasus.py
+2
-2
No files found.
src/transformers/tokenization_marian.py
View file @
42049b8e
...
@@ -7,7 +7,7 @@ from typing import Dict, List, Optional, Tuple, Union
...
@@ -7,7 +7,7 @@ from typing import Dict, List, Optional, Tuple, Union
import
sentencepiece
import
sentencepiece
from
.file_utils
import
add_start_docstrings
_to_callable
from
.file_utils
import
add_start_docstrings
from
.tokenization_utils
import
BatchEncoding
,
PreTrainedTokenizer
from
.tokenization_utils
import
BatchEncoding
,
PreTrainedTokenizer
from
.tokenization_utils_base
import
PREPARE_SEQ2SEQ_BATCH_DOCSTRING
from
.tokenization_utils_base
import
PREPARE_SEQ2SEQ_BATCH_DOCSTRING
...
@@ -125,7 +125,7 @@ class MarianTokenizer(PreTrainedTokenizer):
...
@@ -125,7 +125,7 @@ class MarianTokenizer(PreTrainedTokenizer):
# We don't expect to process pairs, but leave the pair logic for API consistency
# We don't expect to process pairs, but leave the pair logic for API consistency
return
token_ids_0
+
token_ids_1
+
[
self
.
eos_token_id
]
return
token_ids_0
+
token_ids_1
+
[
self
.
eos_token_id
]
@
add_start_docstrings
_to_callable
(
PREPARE_SEQ2SEQ_BATCH_DOCSTRING
)
@
add_start_docstrings
(
PREPARE_SEQ2SEQ_BATCH_DOCSTRING
)
def
prepare_seq2seq_batch
(
def
prepare_seq2seq_batch
(
self
,
self
,
src_texts
:
List
[
str
],
src_texts
:
List
[
str
],
...
...
src/transformers/tokenization_mbart.py
View file @
42049b8e
...
@@ -15,7 +15,7 @@
...
@@ -15,7 +15,7 @@
from
typing
import
List
,
Optional
from
typing
import
List
,
Optional
from
.file_utils
import
add_start_docstrings
_to_callable
from
.file_utils
import
add_start_docstrings
from
.tokenization_utils
import
BatchEncoding
from
.tokenization_utils
import
BatchEncoding
from
.tokenization_utils_base
import
PREPARE_SEQ2SEQ_BATCH_DOCSTRING
from
.tokenization_utils_base
import
PREPARE_SEQ2SEQ_BATCH_DOCSTRING
from
.tokenization_xlm_roberta
import
XLMRobertaTokenizer
from
.tokenization_xlm_roberta
import
XLMRobertaTokenizer
...
@@ -156,7 +156,7 @@ class MBartTokenizer(XLMRobertaTokenizer):
...
@@ -156,7 +156,7 @@ class MBartTokenizer(XLMRobertaTokenizer):
# We don't expect to process pairs, but leave the pair logic for API consistency
# We don't expect to process pairs, but leave the pair logic for API consistency
return
self
.
prefix_tokens
+
token_ids_0
+
token_ids_1
+
self
.
suffix_tokens
return
self
.
prefix_tokens
+
token_ids_0
+
token_ids_1
+
self
.
suffix_tokens
@
add_start_docstrings
_to_callable
(
PREPARE_SEQ2SEQ_BATCH_DOCSTRING
)
@
add_start_docstrings
(
PREPARE_SEQ2SEQ_BATCH_DOCSTRING
)
def
prepare_seq2seq_batch
(
def
prepare_seq2seq_batch
(
self
,
self
,
src_texts
:
List
[
str
],
src_texts
:
List
[
str
],
...
...
src/transformers/tokenization_pegasus.py
View file @
42049b8e
...
@@ -16,7 +16,7 @@ from typing import Dict, List, Optional
...
@@ -16,7 +16,7 @@ from typing import Dict, List, Optional
from
transformers.tokenization_reformer
import
ReformerTokenizer
from
transformers.tokenization_reformer
import
ReformerTokenizer
from
.file_utils
import
add_start_docstrings
_to_callable
from
.file_utils
import
add_start_docstrings
from
.tokenization_utils_base
import
PREPARE_SEQ2SEQ_BATCH_DOCSTRING
,
BatchEncoding
from
.tokenization_utils_base
import
PREPARE_SEQ2SEQ_BATCH_DOCSTRING
,
BatchEncoding
...
@@ -104,7 +104,7 @@ class PegasusTokenizer(ReformerTokenizer):
...
@@ -104,7 +104,7 @@ class PegasusTokenizer(ReformerTokenizer):
# We don't expect to process pairs, but leave the pair logic for API consistency
# We don't expect to process pairs, but leave the pair logic for API consistency
return
token_ids_0
+
token_ids_1
+
[
self
.
eos_token_id
]
return
token_ids_0
+
token_ids_1
+
[
self
.
eos_token_id
]
@
add_start_docstrings
_to_callable
(
PREPARE_SEQ2SEQ_BATCH_DOCSTRING
)
@
add_start_docstrings
(
PREPARE_SEQ2SEQ_BATCH_DOCSTRING
)
def
prepare_seq2seq_batch
(
def
prepare_seq2seq_batch
(
self
,
self
,
src_texts
:
List
[
str
],
src_texts
:
List
[
str
],
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment