Unverified Commit bb54b68e authored by wang jiahao's avatar wang jiahao Committed by GitHub
Browse files

Merge pull request #761 from kvcache-ai/fix-server-bug

fix ollama api temperature bug
parents 69382e58 7cdf8139
......@@ -227,7 +227,7 @@ class KTransformersInterface(TransformersInterface):
device = self.device_map.get("blk.0.self_attn", {}).get("generate_device", "cuda:0")
return torch.tensor([self.seq_length - 1], device=device)
async def inference(self, local_messages, thread_id: str, temperature: Optional[float], top_p: Optional[float]):
async def inference(self, local_messages, thread_id: str, temperature: Optional[float] = None, top_p: Optional[float] = None):
async with self._infer_lock:
async for v in super().inference(local_messages, thread_id, temperature, top_p):
yield v
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment