Commit 2c3dcd97 authored by hrz6976's avatar hrz6976
Browse files

Add a lock to server inference()

parent 8bad019e
import torch
import asyncio
from transformers import AutoTokenizer, AutoConfig, GenerationConfig
from ktransformers.server.backend.interfaces.transformers import (
TransformersInterface,
......@@ -70,6 +71,8 @@ class KTransformersInterface(TransformersInterface):
self.model.generation_config.pad_token_id = self.model.generation_config.eos_token_id
self.streamer = TextStreamer(self.tokenizer)
self._infer_lock = asyncio.Lock()
def decode_one_tokens(self):
device_map = self.model.gguf_loader.tensor_device_map
torch_device = get_device("blk.0.self_attn", device_map)
......@@ -171,4 +174,9 @@ class KTransformersInterface(TransformersInterface):
@property
def active_cache_position(self):
device = self.device_map.get("blk.0.self_attn", {}).get("generate_device", "cuda:0")
return torch.tensor([self.seq_length - 1], device=device)
\ No newline at end of file
return torch.tensor([self.seq_length - 1], device=device)
async def inference(self, local_messages, thread_id: str):
async with self._infer_lock:
async for v in super().inference(local_messages, thread_id):
yield v
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment