Unverified Commit 6fd254a3 authored by Arnaud Stiegler's avatar Arnaud Stiegler Committed by GitHub
Browse files

Removing BertConfig inheritance from LayoutLMConfig (#19307)

* removing BertConfig inheritance

* fix missing arguments
parent a9782881
......@@ -21,7 +21,6 @@ from transformers import PretrainedConfig, PreTrainedTokenizer, TensorType
from ... import is_torch_available
from ...onnx import OnnxConfig, PatchingSpec
from ...utils import logging
from ..bert.configuration_bert import BertConfig
logger = logging.get_logger(__name__)
......@@ -36,7 +35,7 @@ LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP = {
}
class LayoutLMConfig(BertConfig):
class LayoutLMConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`LayoutLMModel`]. It is used to instantiate a
LayoutLM model according to the specified arguments, defining the model architecture. Instantiating a configuration
......@@ -110,25 +109,28 @@ class LayoutLMConfig(BertConfig):
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
position_embedding_type="absolute",
use_cache=True,
classifier_dropout=None,
max_2d_position_embeddings=1024,
**kwargs
):
super().__init__(
vocab_size=vocab_size,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
layer_norm_eps=layer_norm_eps,
pad_token_id=pad_token_id,
**kwargs,
)
super().__init__(pad_token_id=pad_token_id, **kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.position_embedding_type = position_embedding_type
self.use_cache = use_cache
self.classifier_dropout = classifier_dropout
self.max_2d_position_embeddings = max_2d_position_embeddings
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment