"git@developer.sourcefind.cn:OpenDAS/fastmoe.git" did not exist on "d99ae3f21983b8450981470b6a534e7b705b9b30"
Unverified Commit 587d84b1 authored by Younes Belkada's avatar Younes Belkada Committed by GitHub
Browse files

Add `BloomForQuestionAnswering` (#19310)



* add bloom for question answering

- attempt to add Bloom for question answering
- adapted from `GPTJForQuestionAnswering`
- Fixed `num_labels` to `2` for common tests
- Added a bit of docstring
- All common tests pass

* Update src/transformers/models/bloom/modeling_bloom.py
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>

* revert changes related to `num_labels`
Co-authored-by: default avatarSylvain Gugger <35901082+sgugger@users.noreply.github.com>
parent 6dce9e0c
...@@ -55,3 +55,8 @@ Several smaller versions of the models have been trained on the same dataset. BL ...@@ -55,3 +55,8 @@ Several smaller versions of the models have been trained on the same dataset. BL
[[autodoc]] BloomForTokenClassification [[autodoc]] BloomForTokenClassification
- forward - forward
## BloomForQuestionAnswering
[[autodoc]] BloomForQuestionAnswering
- forward
...@@ -993,6 +993,7 @@ else: ...@@ -993,6 +993,7 @@ else:
"BloomPreTrainedModel", "BloomPreTrainedModel",
"BloomForSequenceClassification", "BloomForSequenceClassification",
"BloomForTokenClassification", "BloomForTokenClassification",
"BloomForQuestionAnswering",
] ]
) )
_import_structure["models.blenderbot"].extend( _import_structure["models.blenderbot"].extend(
...@@ -3857,6 +3858,7 @@ if TYPE_CHECKING: ...@@ -3857,6 +3858,7 @@ if TYPE_CHECKING:
from .models.bloom import ( from .models.bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM, BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification, BloomForSequenceClassification,
BloomForTokenClassification, BloomForTokenClassification,
BloomModel, BloomModel,
......
...@@ -572,6 +572,7 @@ MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict( ...@@ -572,6 +572,7 @@ MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES = OrderedDict(
("bert", "BertForQuestionAnswering"), ("bert", "BertForQuestionAnswering"),
("big_bird", "BigBirdForQuestionAnswering"), ("big_bird", "BigBirdForQuestionAnswering"),
("bigbird_pegasus", "BigBirdPegasusForQuestionAnswering"), ("bigbird_pegasus", "BigBirdPegasusForQuestionAnswering"),
("bloom", "BloomForQuestionAnswering"),
("camembert", "CamembertForQuestionAnswering"), ("camembert", "CamembertForQuestionAnswering"),
("canine", "CanineForQuestionAnswering"), ("canine", "CanineForQuestionAnswering"),
("convbert", "ConvBertForQuestionAnswering"), ("convbert", "ConvBertForQuestionAnswering"),
......
...@@ -45,6 +45,7 @@ else: ...@@ -45,6 +45,7 @@ else:
"BloomPreTrainedModel", "BloomPreTrainedModel",
"BloomForSequenceClassification", "BloomForSequenceClassification",
"BloomForTokenClassification", "BloomForTokenClassification",
"BloomForQuestionAnswering",
] ]
if TYPE_CHECKING: if TYPE_CHECKING:
...@@ -67,6 +68,7 @@ if TYPE_CHECKING: ...@@ -67,6 +68,7 @@ if TYPE_CHECKING:
from .modeling_bloom import ( from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM, BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification, BloomForSequenceClassification,
BloomForTokenClassification, BloomForTokenClassification,
BloomModel, BloomModel,
......
...@@ -28,6 +28,7 @@ from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_ ...@@ -28,6 +28,7 @@ from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_
from ...modeling_outputs import ( from ...modeling_outputs import (
BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPastAndCrossAttentions,
CausalLMOutputWithCrossAttentions, CausalLMOutputWithCrossAttentions,
QuestionAnsweringModelOutput,
SequenceClassifierOutputWithPast, SequenceClassifierOutputWithPast,
TokenClassifierOutput, TokenClassifierOutput,
) )
...@@ -1167,3 +1168,95 @@ class BloomForTokenClassification(BloomPreTrainedModel): ...@@ -1167,3 +1168,95 @@ class BloomForTokenClassification(BloomPreTrainedModel):
hidden_states=transformer_outputs.hidden_states, hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions, attentions=transformer_outputs.attentions,
) )
@add_start_docstrings(
"""
The BLOOM Model transformer with a span classification head on top for extractive question-answering tasks like
SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
BLOOM_START_DOCSTRING,
)
class BloomForQuestionAnswering(BloomPreTrainedModel):
_keys_to_ignore_on_load_missing = [r"h.*.self_attention.scale_mask_softmax.causal_mask", r"lm_head.weight"]
def __init__(self, config):
super().__init__(config)
self.transformer = BloomModel(config)
self.qa_outputs = nn.Linear(config.hidden_size, 2)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(BLOOM_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
start_positions: Optional[torch.LongTensor] = None,
end_positions: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, QuestionAnsweringModelOutput]:
r"""
start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.transformer(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1).contiguous()
end_logits = end_logits.squeeze(-1).contiguous()
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
end_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
if not return_dict:
output = (start_logits, end_logits) + outputs[2:]
return ((total_loss,) + output) if total_loss is not None else output
return QuestionAnsweringModelOutput(
loss=total_loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
...@@ -1006,6 +1006,13 @@ class BloomForCausalLM(metaclass=DummyObject): ...@@ -1006,6 +1006,13 @@ class BloomForCausalLM(metaclass=DummyObject):
requires_backends(self, ["torch"]) requires_backends(self, ["torch"])
class BloomForQuestionAnswering(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
class BloomForSequenceClassification(metaclass=DummyObject): class BloomForSequenceClassification(metaclass=DummyObject):
_backends = ["torch"] _backends = ["torch"]
......
...@@ -31,6 +31,7 @@ if is_torch_available(): ...@@ -31,6 +31,7 @@ if is_torch_available():
from transformers import ( from transformers import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM, BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification, BloomForSequenceClassification,
BloomForTokenClassification, BloomForTokenClassification,
BloomModel, BloomModel,
...@@ -274,6 +275,14 @@ class BloomModelTester: ...@@ -274,6 +275,14 @@ class BloomModelTester:
result = model(input_ids, attention_mask=input_mask) result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_question_answering_model(self, config, input_ids, input_mask, *args):
model = BloomForQuestionAnswering(config)
model.to(torch_device)
model.eval()
result = model(input_ids, attention_mask=input_mask)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def create_and_check_forward_and_backwards( def create_and_check_forward_and_backwards(
self, config, input_ids, input_mask, *args, gradient_checkpointing=False self, config, input_ids, input_mask, *args, gradient_checkpointing=False
): ):
...@@ -314,6 +323,7 @@ class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase) ...@@ -314,6 +323,7 @@ class BloomModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase)
BloomForCausalLM, BloomForCausalLM,
BloomForSequenceClassification, BloomForSequenceClassification,
BloomForTokenClassification, BloomForTokenClassification,
BloomForQuestionAnswering,
) )
if is_torch_available() if is_torch_available()
else () else ()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment