Unverified Commit 3ec828d6 authored by Patrick von Platen's avatar Patrick von Platen Committed by GitHub
Browse files

Fix moved _expand_mask function (#5581)

* finish

* finish
parent 9135e54e
...@@ -19,10 +19,21 @@ from torch import nn ...@@ -19,10 +19,21 @@ from torch import nn
from transformers import CLIPPreTrainedModel from transformers import CLIPPreTrainedModel
from transformers.modeling_outputs import BaseModelOutputWithPooling from transformers.modeling_outputs import BaseModelOutputWithPooling
from transformers.models.clip.configuration_clip import CLIPTextConfig from transformers.models.clip.configuration_clip import CLIPTextConfig
from transformers.models.clip.modeling_clip import ( from transformers.models.clip.modeling_clip import CLIPEncoder
CLIPEncoder,
_expand_mask,
) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
"""
Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
"""
bsz, src_len = mask.size()
tgt_len = tgt_len if tgt_len is not None else src_len
expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype)
inverted_mask = 1.0 - expanded_mask
return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min)
# This is a modified version of the CLIPTextModel from transformers.models.clip.modeling_clip # This is a modified version of the CLIPTextModel from transformers.models.clip.modeling_clip
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment