Unverified Commit d9ac6392 authored by Yueyang Pan's avatar Yueyang Pan Committed by GitHub
Browse files

Fix flashinfer version (#576)

parent 26294b2f
...@@ -30,12 +30,8 @@ class RadixAttention(nn.Module): ...@@ -30,12 +30,8 @@ class RadixAttention(nn.Module):
self.prefill_forward = self.prefill_forward_flashinfer self.prefill_forward = self.prefill_forward_flashinfer
self.extend_forward = self.prefill_forward_flashinfer self.extend_forward = self.prefill_forward_flashinfer
self.decode_forward = self.decode_forward_flashinfer self.decode_forward = self.decode_forward_flashinfer
# flashinfer only accepts a boolean logit_cap argument # flashinfer now accepts float logit_cap argument
if logit_cap > 0: self.logit_cap = logit_cap if logit_cap > 0 else 0
assert logit_cap == 30
self.logit_cap = True
else:
self.logit_cap = False
else: else:
self.prefill_forward = self.prefill_forward_triton self.prefill_forward = self.prefill_forward_triton
self.extend_forward = self.extend_forward_triton self.extend_forward = self.extend_forward_triton
...@@ -110,7 +106,7 @@ class RadixAttention(nn.Module): ...@@ -110,7 +106,7 @@ class RadixAttention(nn.Module):
o = input_metadata.flashinfer_prefill_wrapper.forward( o = input_metadata.flashinfer_prefill_wrapper.forward(
q.contiguous().view(-1, self.tp_q_head_num, self.head_dim), q.contiguous().view(-1, self.tp_q_head_num, self.head_dim),
input_metadata.token_to_kv_pool.kv_data[self.layer_id], input_metadata.token_to_kv_pool.kv_data[self.layer_id],
logits_cap=self.logit_cap, logits_soft_cap=self.logit_cap,
) )
return o.view(-1, self.tp_q_head_num * self.head_dim) return o.view(-1, self.tp_q_head_num * self.head_dim)
...@@ -121,7 +117,7 @@ class RadixAttention(nn.Module): ...@@ -121,7 +117,7 @@ class RadixAttention(nn.Module):
o = input_metadata.flashinfer_decode_wrapper.forward( o = input_metadata.flashinfer_decode_wrapper.forward(
q.contiguous().view(-1, self.tp_q_head_num, self.head_dim), q.contiguous().view(-1, self.tp_q_head_num, self.head_dim),
input_metadata.token_to_kv_pool.kv_data[self.layer_id], input_metadata.token_to_kv_pool.kv_data[self.layer_id],
logits_cap=self.logit_cap, logits_soft_cap=self.logit_cap,
) )
return o.view(-1, self.tp_q_head_num * self.head_dim) return o.view(-1, self.tp_q_head_num * self.head_dim)
......
...@@ -152,7 +152,7 @@ def launch_server(server_args: ServerArgs, pipe_finish_writer, model_overide_arg ...@@ -152,7 +152,7 @@ def launch_server(server_args: ServerArgs, pipe_finish_writer, model_overide_arg
if server_args.disable_disk_cache: if server_args.disable_disk_cache:
disable_cache() disable_cache()
if server_args.enable_flashinfer: if server_args.enable_flashinfer:
assert_pkg_version("flashinfer", "0.0.5") assert_pkg_version("flashinfer", "0.0.7")
if server_args.chat_template: if server_args.chat_template:
# TODO: replace this with huggingface transformers template # TODO: replace this with huggingface transformers template
load_chat_template_for_openai_api(server_args.chat_template) load_chat_template_for_openai_api(server_args.chat_template)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment