"...git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "24c7d578baf6a8b79890101dd280278fff031d12"
Unverified Commit 2ca59806 authored by drbh's avatar drbh Committed by GitHub
Browse files

Pr 2337 ci branch (#2379)



* hotfix: fix xpu crash brought by code refine. torch.xpu rely on import ipex
Signed-off-by: default avatarWang, Yi A <yi.a.wang@intel.com>

* reable gemma2 in xpu
Signed-off-by: default avatarWang, Yi A <yi.a.wang@intel.com>

* fix in regression in ipex flashattention
Signed-off-by: default avatarWang, Yi A <yi.a.wang@intel.com>

---------
Signed-off-by: default avatarWang, Yi A <yi.a.wang@intel.com>
Co-authored-by: default avatarWang, Yi A <yi.a.wang@intel.com>
parent 689b1abb
...@@ -2,6 +2,7 @@ import intel_extension_for_pytorch as ipex ...@@ -2,6 +2,7 @@ import intel_extension_for_pytorch as ipex
import torch import torch
from text_generation_server.models.flash_causal_lm import BLOCK_SIZE from text_generation_server.models.flash_causal_lm import BLOCK_SIZE
from text_generation_server.layers.attention import Seqlen from text_generation_server.layers.attention import Seqlen
from typing import Optional
SUPPORTS_WINDOWING = False SUPPORTS_WINDOWING = False
...@@ -15,11 +16,12 @@ def attention( ...@@ -15,11 +16,12 @@ def attention(
softmax_scale, softmax_scale,
window_size_left=-1, window_size_left=-1,
causal=True, causal=True,
softcap: Optional[float] = None,
): ):
out = torch.empty_like(q) out = torch.empty_like(q)
# We do not need to check window_size_left (not supported) here, so it is already checked ahead of time at model load. # We do not need to check window_size_left (not supported) here, so it is already checked ahead of time at model load.
return ipex.llm.functional.varlen_attention( ipex.llm.functional.varlen_attention(
q, q,
k, k,
v, v,
...@@ -36,6 +38,8 @@ def attention( ...@@ -36,6 +38,8 @@ def attention(
None, None,
) )
return out
def reshape_and_cache( def reshape_and_cache(
key: torch.Tensor, key: torch.Tensor,
...@@ -58,6 +62,7 @@ def paged_attention( ...@@ -58,6 +62,7 @@ def paged_attention(
block_tables: torch.Tensor, block_tables: torch.Tensor,
seqlen: Seqlen, seqlen: Seqlen,
max_s: int, max_s: int,
softcap: Optional[float] = None,
): ):
out = torch.empty_like(query) out = torch.empty_like(query)
ipex.llm.modules.PagedAttention.single_query_cached_kv_attention( ipex.llm.modules.PagedAttention.single_query_cached_kv_attention(
......
...@@ -56,6 +56,8 @@ elif torch.version.cuda is not None and torch.cuda.is_available(): ...@@ -56,6 +56,8 @@ elif torch.version.cuda is not None and torch.cuda.is_available():
get_free_memory = get_cuda_free_memory get_free_memory = get_cuda_free_memory
elif is_ipex_available(): elif is_ipex_available():
SYSTEM = "ipex" SYSTEM = "ipex"
import intel_extension_for_pytorch # noqa: F401
if hasattr(torch, "xpu") and torch.xpu.is_available(): if hasattr(torch, "xpu") and torch.xpu.is_available():
empty_cache = torch.xpu.empty_cache empty_cache = torch.xpu.empty_cache
synchronize = torch.xpu.synchronize synchronize = torch.xpu.synchronize
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment