Unverified Commit ddeb9d42 authored by James Xu's avatar James Xu Committed by GitHub
Browse files

Add engine encode (#1995)


Co-authored-by: default avatarByron Hsu <byronhsu1230@gmail.com>
parent aaf0a315
...@@ -874,4 +874,12 @@ class Engine: ...@@ -874,4 +874,12 @@ class Engine:
else: else:
return tokenizer_manager.tokenizer return tokenizer_manager.tokenizer
# TODO (ByronHsu): encode def encode(
self,
prompt: Union[str, List[str], List[Dict], List[List[Dict]]],
):
obj = EmbeddingReqInput(text=prompt)
# get the current event loop
loop = asyncio.get_event_loop()
return loop.run_until_complete(encode_request(obj, None))
...@@ -28,6 +28,7 @@ from sglang.utils import get_exception_traceback ...@@ -28,6 +28,7 @@ from sglang.utils import get_exception_traceback
DEFAULT_FP8_MODEL_NAME_FOR_TEST = "neuralmagic/Meta-Llama-3.1-8B-FP8" DEFAULT_FP8_MODEL_NAME_FOR_TEST = "neuralmagic/Meta-Llama-3.1-8B-FP8"
DEFAULT_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.1-8B-Instruct" DEFAULT_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.1-8B-Instruct"
DEFAULT_SMALL_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.2-1B-Instruct" DEFAULT_SMALL_MODEL_NAME_FOR_TEST = "meta-llama/Llama-3.2-1B-Instruct"
DEFAULT_SMALL_EMBEDDING_MODEL_NAME_FOR_TEST = "Alibaba-NLP/gte-Qwen2-1.5B-instruct"
DEFAULT_MOE_MODEL_NAME_FOR_TEST = "mistralai/Mixtral-8x7B-Instruct-v0.1" DEFAULT_MOE_MODEL_NAME_FOR_TEST = "mistralai/Mixtral-8x7B-Instruct-v0.1"
DEFAULT_MLA_MODEL_NAME_FOR_TEST = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct" DEFAULT_MLA_MODEL_NAME_FOR_TEST = "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct"
DEFAULT_MLA_FP8_MODEL_NAME_FOR_TEST = "neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8" DEFAULT_MLA_FP8_MODEL_NAME_FOR_TEST = "neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8"
......
...@@ -8,11 +8,14 @@ import json ...@@ -8,11 +8,14 @@ import json
import unittest import unittest
from types import SimpleNamespace from types import SimpleNamespace
import torch
import sglang as sgl import sglang as sgl
from sglang.srt.hf_transformers_utils import get_tokenizer from sglang.srt.hf_transformers_utils import get_tokenizer
from sglang.test.few_shot_gsm8k_engine import run_eval from sglang.test.few_shot_gsm8k_engine import run_eval
from sglang.test.test_utils import ( from sglang.test.test_utils import (
DEFAULT_MODEL_NAME_FOR_TEST, DEFAULT_MODEL_NAME_FOR_TEST,
DEFAULT_SMALL_EMBEDDING_MODEL_NAME_FOR_TEST,
DEFAULT_SMALL_MODEL_NAME_FOR_TEST, DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
) )
...@@ -133,6 +136,22 @@ class TestSRTEngine(unittest.TestCase): ...@@ -133,6 +136,22 @@ class TestSRTEngine(unittest.TestCase):
print(out2) print(out2)
assert out1 == out2, f"{out1} != {out2}" assert out1 == out2, f"{out1} != {out2}"
def test_6_engine_runtime_encode_consistency(self):
prompt = "Today is a sunny day and I like"
model_path = DEFAULT_SMALL_EMBEDDING_MODEL_NAME_FOR_TEST
engine = sgl.Engine(
model_path=model_path, is_embedding=True, random_seed=42, log_level="error"
)
out1 = torch.tensor(engine.encode(prompt)["embedding"])
engine.shutdown()
runtime = sgl.Runtime(model_path=model_path, is_embedding=True, random_seed=42)
out2 = torch.tensor(json.loads(runtime.encode(prompt))["embedding"])
runtime.shutdown()
self.assertTrue(torch.allclose(out1, out2, atol=1e-5, rtol=1e-3))
if __name__ == "__main__": if __name__ == "__main__":
unittest.main() unittest.main()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment