Unverified Commit f5c113e4 authored by Daniel Socek's avatar Daniel Socek Committed by GitHub
Browse files

Use SDP on BF16 in GPU/HPU migration (#12310)



* Use SDP on BF16 in GPU/HPU migration
Signed-off-by: default avatarDaniel Socek <daniel.socek@intel.com>

* Formatting fix for enabling SDP with BF16 precision on HPU
Signed-off-by: default avatarDaniel Socek <daniel.socek@intel.com>

---------
Signed-off-by: default avatarDaniel Socek <daniel.socek@intel.com>
parent 5e181edd
......@@ -505,6 +505,13 @@ class DiffusionPipeline(ConfigMixin, PushToHubMixin):
os.environ["PT_HPU_MAX_COMPOUND_OP_SIZE"] = "1"
logger.debug("Environment variable set: PT_HPU_MAX_COMPOUND_OP_SIZE=1")
if dtype in (torch.bfloat16, None) and kwargs.pop("sdp_on_bf16", True):
if hasattr(torch._C, "_set_math_sdp_allow_fp16_bf16_reduction"):
torch._C._set_math_sdp_allow_fp16_bf16_reduction(True)
logger.warning(
"Enabled SDP with BF16 precision on HPU. To disable, please use `.to('hpu', sdp_on_bf16=False)`"
)
module_names, _ = self._get_signature_keys(self)
modules = [getattr(self, n, None) for n in module_names]
modules = [m for m in modules if isinstance(m, torch.nn.Module)]
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment