Unverified Commit e201864b authored by Younes Belkada's avatar Younes Belkada Committed by GitHub
Browse files

[`GPTNeoX`] Fix GPTNeoX + Flash Attention 2 issue (#28645)

Update modeling_gpt_neox.py
parent dafd5951
...@@ -390,7 +390,7 @@ class GPTNeoXFlashAttention2(GPTNeoXAttention): ...@@ -390,7 +390,7 @@ class GPTNeoXFlashAttention2(GPTNeoXAttention):
elif hasattr(self.config, "_pre_quantization_dtype"): elif hasattr(self.config, "_pre_quantization_dtype"):
target_dtype = self.config._pre_quantization_dtype target_dtype = self.config._pre_quantization_dtype
else: else:
target_dtype = self.q_proj.weight.dtype target_dtype = self.query_key_value.weight.dtype
logger.warning_once( logger.warning_once(
f"The input hidden states seems to be silently casted in float32, this might be related to" f"The input hidden states seems to be silently casted in float32, this might be related to"
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment