Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
89b6ee49
"vscode:/vscode.git/clone" did not exist on "e783ea730456f67a246cb6d5db4893349f3e5af2"
Unverified
Commit
89b6ee49
authored
Jun 27, 2023
by
Sylvain Gugger
Committed by
GitHub
Jun 27, 2023
Browse files
Finishing tidying keys to ignore on load (#24535)
parent
04f46a22
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
11 additions
and
7 deletions
+11
-7
src/transformers/models/instructblip/modeling_instructblip.py
...transformers/models/instructblip/modeling_instructblip.py
+3
-7
tests/models/timm_backbone/test_modeling_timm_backbone.py
tests/models/timm_backbone/test_modeling_timm_backbone.py
+8
-0
No files found.
src/transformers/models/instructblip/modeling_instructblip.py
View file @
89b6ee49
...
@@ -275,12 +275,6 @@ class InstructBlipPreTrainedModel(PreTrainedModel):
...
@@ -275,12 +275,6 @@ class InstructBlipPreTrainedModel(PreTrainedModel):
config_class
=
InstructBlipConfig
config_class
=
InstructBlipConfig
base_model_prefix
=
"blip"
base_model_prefix
=
"blip"
supports_gradient_checkpointing
=
True
supports_gradient_checkpointing
=
True
_keys_to_ignore_on_load_missing
=
[
r
"position_ids"
,
r
"language_model.encoder.embed_tokens.weight"
,
r
"language_model.decoder.embed_tokens.weight"
,
r
"language_model.lm_head.weight"
,
]
_no_split_modules
=
[
"InstructBlipAttention"
,
"InstructBlipQFormerMultiHeadAttention"
]
_no_split_modules
=
[
"InstructBlipAttention"
,
"InstructBlipQFormerMultiHeadAttention"
]
_keep_in_fp32_modules
=
[]
_keep_in_fp32_modules
=
[]
...
@@ -1011,7 +1005,9 @@ class InstructBlipQFormerEmbeddings(nn.Module):
...
@@ -1011,7 +1005,9 @@ class InstructBlipQFormerEmbeddings(nn.Module):
self
.
dropout
=
nn
.
Dropout
(
config
.
hidden_dropout_prob
)
self
.
dropout
=
nn
.
Dropout
(
config
.
hidden_dropout_prob
)
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
self
.
register_buffer
(
"position_ids"
,
torch
.
arange
(
config
.
max_position_embeddings
).
expand
((
1
,
-
1
)))
self
.
register_buffer
(
"position_ids"
,
torch
.
arange
(
config
.
max_position_embeddings
).
expand
((
1
,
-
1
)),
persistent
=
False
)
self
.
position_embedding_type
=
getattr
(
config
,
"position_embedding_type"
,
"absolute"
)
self
.
position_embedding_type
=
getattr
(
config
,
"position_embedding_type"
,
"absolute"
)
self
.
config
=
config
self
.
config
=
config
...
...
tests/models/timm_backbone/test_modeling_timm_backbone.py
View file @
89b6ee49
...
@@ -176,6 +176,14 @@ class TimmBackboneModelTest(ModelTesterMixin, BackboneTesterMixin, PipelineTeste
...
@@ -176,6 +176,14 @@ class TimmBackboneModelTest(ModelTesterMixin, BackboneTesterMixin, PipelineTeste
def
test_tied_model_weights_key_ignore
(
self
):
def
test_tied_model_weights_key_ignore
(
self
):
pass
pass
@
unittest
.
skip
(
"Only checkpoints on timm can be loaded into TimmBackbone"
)
def
test_load_save_without_tied_weights
(
self
):
pass
@
unittest
.
skip
(
"Only checkpoints on timm can be loaded into TimmBackbone"
)
def
test_model_weights_reload_no_missing_tied_weights
(
self
):
pass
@
unittest
.
skip
(
"TimmBackbone doesn't have hidden size info in its configuration."
)
@
unittest
.
skip
(
"TimmBackbone doesn't have hidden size info in its configuration."
)
def
test_channels
(
self
):
def
test_channels
(
self
):
pass
pass
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment