Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
chenpangpang
transformers
Commits
6fd254a3
Unverified
Commit
6fd254a3
authored
Oct 04, 2022
by
Arnaud Stiegler
Committed by
GitHub
Oct 04, 2022
Browse files
Removing BertConfig inheritance from LayoutLMConfig (#19307)
* removing BertConfig inheritance * fix missing arguments
parent
a9782881
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
20 additions
and
18 deletions
+20
-18
src/transformers/models/layoutlm/configuration_layoutlm.py
src/transformers/models/layoutlm/configuration_layoutlm.py
+20
-18
No files found.
src/transformers/models/layoutlm/configuration_layoutlm.py
View file @
6fd254a3
...
...
@@ -21,7 +21,6 @@ from transformers import PretrainedConfig, PreTrainedTokenizer, TensorType
from
...
import
is_torch_available
from
...onnx
import
OnnxConfig
,
PatchingSpec
from
...utils
import
logging
from
..bert.configuration_bert
import
BertConfig
logger
=
logging
.
get_logger
(
__name__
)
...
...
@@ -36,7 +35,7 @@ LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
}
class
LayoutLMConfig
(
Bert
Config
):
class
LayoutLMConfig
(
Pretrained
Config
):
r
"""
This is the configuration class to store the configuration of a [`LayoutLMModel`]. It is used to instantiate a
LayoutLM model according to the specified arguments, defining the model architecture. Instantiating a configuration
...
...
@@ -110,25 +109,28 @@ class LayoutLMConfig(BertConfig):
initializer_range
=
0.02
,
layer_norm_eps
=
1e-12
,
pad_token_id
=
0
,
position_embedding_type
=
"absolute"
,
use_cache
=
True
,
classifier_dropout
=
None
,
max_2d_position_embeddings
=
1024
,
**
kwargs
):
super
().
__init__
(
vocab_size
=
vocab_size
,
hidden_size
=
hidden_size
,
num_hidden_layers
=
num_hidden_layers
,
num_attention_heads
=
num_attention_heads
,
intermediate_size
=
intermediate_size
,
hidden_act
=
hidden_act
,
hidden_dropout_prob
=
hidden_dropout_prob
,
attention_probs_dropout_prob
=
attention_probs_dropout_prob
,
max_position_embeddings
=
max_position_embeddings
,
type_vocab_size
=
type_vocab_size
,
initializer_range
=
initializer_range
,
layer_norm_eps
=
layer_norm_eps
,
pad_token_id
=
pad_token_id
,
**
kwargs
,
)
super
().
__init__
(
pad_token_id
=
pad_token_id
,
**
kwargs
)
self
.
vocab_size
=
vocab_size
self
.
hidden_size
=
hidden_size
self
.
num_hidden_layers
=
num_hidden_layers
self
.
num_attention_heads
=
num_attention_heads
self
.
hidden_act
=
hidden_act
self
.
intermediate_size
=
intermediate_size
self
.
hidden_dropout_prob
=
hidden_dropout_prob
self
.
attention_probs_dropout_prob
=
attention_probs_dropout_prob
self
.
max_position_embeddings
=
max_position_embeddings
self
.
type_vocab_size
=
type_vocab_size
self
.
initializer_range
=
initializer_range
self
.
layer_norm_eps
=
layer_norm_eps
self
.
position_embedding_type
=
position_embedding_type
self
.
use_cache
=
use_cache
self
.
classifier_dropout
=
classifier_dropout
self
.
max_2d_position_embeddings
=
max_2d_position_embeddings
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment