Unverified Commit 45addfe9 authored by Stas Bekman's avatar Stas Bekman Committed by GitHub
Browse files

FlaubertForTokenClassification (#5644)

* implement FlaubertForTokenClassification as a subclass of XLMForTokenClassification

* fix mapping order

* add the doc

* add common tests
parent 7096e475
...@@ -61,6 +61,13 @@ FlaubertForSequenceClassification ...@@ -61,6 +61,13 @@ FlaubertForSequenceClassification
:members: :members:
FlaubertForTokenClassification
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.FlaubertForTokenClassification
:members:
FlaubertForQuestionAnsweringSimple FlaubertForQuestionAnsweringSimple
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
...@@ -114,4 +121,4 @@ TFFlaubertForQuestionAnsweringSimple ...@@ -114,4 +121,4 @@ TFFlaubertForQuestionAnsweringSimple
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autoclass:: transformers.TFFlaubertForQuestionAnsweringSimple .. autoclass:: transformers.TFFlaubertForQuestionAnsweringSimple
:members: :members:
\ No newline at end of file
...@@ -353,6 +353,7 @@ if is_torch_available(): ...@@ -353,6 +353,7 @@ if is_torch_available():
FlaubertModel, FlaubertModel,
FlaubertWithLMHeadModel, FlaubertWithLMHeadModel,
FlaubertForSequenceClassification, FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForQuestionAnswering, FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple, FlaubertForQuestionAnsweringSimple,
FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
......
...@@ -100,6 +100,7 @@ from .modeling_encoder_decoder import EncoderDecoderModel ...@@ -100,6 +100,7 @@ from .modeling_encoder_decoder import EncoderDecoderModel
from .modeling_flaubert import ( from .modeling_flaubert import (
FlaubertForQuestionAnsweringSimple, FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification, FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel, FlaubertModel,
FlaubertWithLMHeadModel, FlaubertWithLMHeadModel,
) )
...@@ -326,6 +327,7 @@ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = OrderedDict( ...@@ -326,6 +327,7 @@ MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = OrderedDict(
[ [
(DistilBertConfig, DistilBertForTokenClassification), (DistilBertConfig, DistilBertForTokenClassification),
(CamembertConfig, CamembertForTokenClassification), (CamembertConfig, CamembertForTokenClassification),
(FlaubertConfig, FlaubertForTokenClassification),
(XLMConfig, XLMForTokenClassification), (XLMConfig, XLMForTokenClassification),
(XLMRobertaConfig, XLMRobertaForTokenClassification), (XLMRobertaConfig, XLMRobertaForTokenClassification),
(LongformerConfig, LongformerForTokenClassification), (LongformerConfig, LongformerForTokenClassification),
...@@ -1552,6 +1554,7 @@ class AutoModelForTokenClassification: ...@@ -1552,6 +1554,7 @@ class AutoModelForTokenClassification:
- isInstance of `bert` configuration class: :class:`~transformers.BertModelForTokenClassification` (Bert model) - isInstance of `bert` configuration class: :class:`~transformers.BertModelForTokenClassification` (Bert model)
- isInstance of `albert` configuration class: :class:`~transformers.AlbertForTokenClassification` (AlBert model) - isInstance of `albert` configuration class: :class:`~transformers.AlbertForTokenClassification` (AlBert model)
- isInstance of `xlnet` configuration class: :class:`~transformers.XLNetModelForTokenClassification` (XLNet model) - isInstance of `xlnet` configuration class: :class:`~transformers.XLNetModelForTokenClassification` (XLNet model)
- isInstance of `flaubert` configuration class: :class:`~transformers.FlaubertForTokenClassification` (Flaubert model)
- isInstance of `camembert` configuration class: :class:`~transformers.CamembertModelForTokenClassification` (Camembert model) - isInstance of `camembert` configuration class: :class:`~transformers.CamembertModelForTokenClassification` (Camembert model)
- isInstance of `roberta` configuration class: :class:`~transformers.RobertaModelForTokenClassification` (Roberta model) - isInstance of `roberta` configuration class: :class:`~transformers.RobertaModelForTokenClassification` (Roberta model)
- isInstance of `electra` configuration class: :class:`~transformers.ElectraForTokenClassification` (Electra model) - isInstance of `electra` configuration class: :class:`~transformers.ElectraForTokenClassification` (Electra model)
...@@ -1589,6 +1592,7 @@ class AutoModelForTokenClassification: ...@@ -1589,6 +1592,7 @@ class AutoModelForTokenClassification:
- `camembert`: :class:`~transformers.CamembertForTokenClassification` (Camembert model) - `camembert`: :class:`~transformers.CamembertForTokenClassification` (Camembert model)
- `bert`: :class:`~transformers.BertForTokenClassification` (Bert model) - `bert`: :class:`~transformers.BertForTokenClassification` (Bert model)
- `xlnet`: :class:`~transformers.XLNetForTokenClassification` (XLNet model) - `xlnet`: :class:`~transformers.XLNetForTokenClassification` (XLNet model)
- `flaubert`: :class:`~transformers.FlaubertForTokenClassification` (Flaubert model)
- `roberta`: :class:`~transformers.RobertaForTokenClassification` (Roberta model) - `roberta`: :class:`~transformers.RobertaForTokenClassification` (Roberta model)
- `electra`: :class:`~transformers.ElectraForTokenClassification` (Electra model) - `electra`: :class:`~transformers.ElectraForTokenClassification` (Electra model)
......
...@@ -28,6 +28,7 @@ from .modeling_xlm import ( ...@@ -28,6 +28,7 @@ from .modeling_xlm import (
XLMForQuestionAnswering, XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple, XLMForQuestionAnsweringSimple,
XLMForSequenceClassification, XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel, XLMModel,
XLMWithLMHeadModel, XLMWithLMHeadModel,
get_masks, get_masks,
...@@ -326,6 +327,25 @@ class FlaubertForSequenceClassification(XLMForSequenceClassification): ...@@ -326,6 +327,25 @@ class FlaubertForSequenceClassification(XLMForSequenceClassification):
self.init_weights() self.init_weights()
@add_start_docstrings(
"""Flaubert Model with a token classification head on top (a linear layer on top of
the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """,
FLAUBERT_START_DOCSTRING,
)
class FlaubertForTokenClassification(XLMForTokenClassification):
"""
This class overrides :class:`~transformers.XLMForTokenClassification`. Please check the
superclass for the appropriate documentation alongside usage examples.
"""
config_class = FlaubertConfig
def __init__(self, config):
super().__init__(config)
self.transformer = FlaubertModel(config)
self.init_weights()
@add_start_docstrings( @add_start_docstrings(
"""Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of """Flaubert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of
the hidden-states output to compute `span start logits` and `span end logits`). """, the hidden-states output to compute `span start logits` and `span end logits`). """,
......
...@@ -31,6 +31,7 @@ if is_torch_available(): ...@@ -31,6 +31,7 @@ if is_torch_available():
FlaubertForQuestionAnswering, FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple, FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification, FlaubertForSequenceClassification,
FlaubertForTokenClassification,
) )
from transformers.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
...@@ -294,6 +295,30 @@ class FlaubertModelTester(object): ...@@ -294,6 +295,30 @@ class FlaubertModelTester(object):
self.parent.assertListEqual(list(result["loss"].size()), []) self.parent.assertListEqual(list(result["loss"].size()), [])
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.type_sequence_label_size]) self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.type_sequence_label_size])
def create_and_check_flaubert_token_classif(
self,
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
input_mask,
):
config.num_labels = self.num_labels
model = FlaubertForTokenClassification(config)
model.to(torch_device)
model.eval()
loss, logits = model(input_ids, attention_mask=input_mask, labels=token_labels)
result = {
"loss": loss,
"logits": logits,
}
self.parent.assertListEqual(list(result["logits"].size()), [self.batch_size, self.seq_length, self.num_labels])
self.check_loss_output(result)
def prepare_config_and_inputs_for_common(self): def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs() config_and_inputs = self.prepare_config_and_inputs()
( (
...@@ -320,6 +345,7 @@ class FlaubertModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -320,6 +345,7 @@ class FlaubertModelTest(ModelTesterMixin, unittest.TestCase):
FlaubertForQuestionAnswering, FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple, FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification, FlaubertForSequenceClassification,
FlaubertForTokenClassification,
) )
if is_torch_available() if is_torch_available()
else () else ()
...@@ -352,6 +378,10 @@ class FlaubertModelTest(ModelTesterMixin, unittest.TestCase): ...@@ -352,6 +378,10 @@ class FlaubertModelTest(ModelTesterMixin, unittest.TestCase):
config_and_inputs = self.model_tester.prepare_config_and_inputs() config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs) self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs)
def test_flaubert_token_classif(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*config_and_inputs)
@slow @slow
def test_model_from_pretrained(self): def test_model_from_pretrained(self):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment