Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
41750a6c
"lmdeploy/vscode:/vscode.git/clone" did not exist on "1a665a63b09a83ab06317f8acfe7e7f75037c5ab"
Commit
41750a6c
authored
Apr 22, 2020
by
sshleifer
Committed by
Julien Chaumond
Apr 27, 2020
Browse files
Fix typos
parent
12bb7fe7
Changes
1
Show whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
4 additions
and
4 deletions
+4
-4
src/transformers/modeling_utils.py
src/transformers/modeling_utils.py
+4
-4
No files found.
src/transformers/modeling_utils.py
View file @
41750a6c
...
...
@@ -302,7 +302,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin):
self
.
_tie_or_clone_weights
(
output_embeddings
,
self
.
get_input_embeddings
())
def
_tie_or_clone_weights
(
self
,
output_embeddings
,
input_embeddings
):
""" Tie or clone module weights depending of we
i
ther we are using TorchScript or not
""" Tie or clone module weights depending of w
h
ether we are using TorchScript or not
"""
if
self
.
config
.
torchscript
:
output_embeddings
.
weight
=
nn
.
Parameter
(
input_embeddings
.
weight
.
clone
())
...
...
@@ -1524,7 +1524,7 @@ class PreTrainedModel(nn.Module, ModuleUtilsMixin):
return
decoded
# force one of token_ids to be generated by setting prob of all other tokens to 0.
def
_force_token_ids_generation
(
self
,
scores
,
token_ids
):
def
_force_token_ids_generation
(
self
,
scores
,
token_ids
)
->
None
:
if
isinstance
(
token_ids
,
int
):
token_ids
=
[
token_ids
]
all_but_token_ids_mask
=
torch
.
tensor
(
...
...
@@ -2025,8 +2025,8 @@ def create_position_ids_from_input_ids(input_ids, padding_idx):
"""
# The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA.
mask
=
input_ids
.
ne
(
padding_idx
).
int
()
incremental_indic
i
es
=
torch
.
cumsum
(
mask
,
dim
=
1
).
type_as
(
mask
)
*
mask
return
incremental_indic
i
es
.
long
()
+
padding_idx
incremental_indices
=
torch
.
cumsum
(
mask
,
dim
=
1
).
type_as
(
mask
)
*
mask
return
incremental_indices
.
long
()
+
padding_idx
def
prune_linear_layer
(
layer
,
index
,
dim
=
0
):
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment