"backend/vscode:/vscode.git/clone" did not exist on "81a3c97069f6a9bfbc4ba2c4968cede773ad0403"
Commit 6c875d84 authored by comfyanonymous's avatar comfyanonymous
Browse files

Fix clip attention mask issues on some hardware.

parent 805c36ac
......@@ -97,7 +97,7 @@ class CLIPTextModel_(torch.nn.Module):
x = self.embeddings(input_tokens)
mask = None
if attention_mask is not None:
mask = 1.0 - attention_mask.to(x.dtype).unsqueeze(1).unsqueeze(1).expand(attention_mask.shape[0], 1, attention_mask.shape[-1], attention_mask.shape[-1])
mask = 1.0 - attention_mask.to(x.dtype).reshape((attention_mask.shape[0], -1, attention_mask.shape[-1])).expand(attention_mask.shape[0], attention_mask.shape[-1], attention_mask.shape[-1])
mask = mask.masked_fill(mask.to(torch.bool), float("-inf"))
causal_mask = torch.empty(x.shape[1], x.shape[1], dtype=x.dtype, device=x.device).fill_(float("-inf")).triu_(1)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment