Unverified Commit eec3f6d1 authored by Chang Su's avatar Chang Su Committed by GitHub
Browse files

[Bugfix] Fix tokenizer_manager not getting 400 when req is too long (#3678)

Co-authored-by: voidxb <unkown>
parent 90bc26a8
...@@ -683,6 +683,8 @@ class Scheduler: ...@@ -683,6 +683,8 @@ class Scheduler:
self.server_args.allow_auto_truncate, self.server_args.allow_auto_truncate,
) )
if error_msg: if error_msg:
req.origin_input_ids = [0]
req.sampling_params.max_new_tokens = 0
self.waiting_queue.append(req) self.waiting_queue.append(req)
return return
......
...@@ -23,17 +23,33 @@ class TestRequestLengthValidation(unittest.TestCase): ...@@ -23,17 +23,33 @@ class TestRequestLengthValidation(unittest.TestCase):
cls.base_url, cls.base_url,
timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH, timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH,
api_key=cls.api_key, api_key=cls.api_key,
other_args=("--max-total-tokens", "1000", "--context-length", "100"), other_args=("--max-total-tokens", "1000", "--context-length", "1000"),
) )
@classmethod @classmethod
def tearDownClass(cls): def tearDownClass(cls):
kill_process_tree(cls.process.pid) kill_process_tree(cls.process.pid)
def test_input_length_validation(self): def test_input_length_longer_than_context_length(self):
client = openai.Client(api_key=self.api_key, base_url=f"{self.base_url}/v1") client = openai.Client(api_key=self.api_key, base_url=f"{self.base_url}/v1")
long_text = "hello " * 100 # Will tokenize to more than context length long_text = "hello " * 1200 # Will tokenize to more than context length
with self.assertRaises(openai.BadRequestError) as cm:
client.chat.completions.create(
model=DEFAULT_SMALL_MODEL_NAME_FOR_TEST,
messages=[
{"role": "user", "content": long_text},
],
temperature=0,
)
self.assertIn("is longer than the model's context length", str(cm.exception))
def test_input_length_longer_than_maximum_allowed_length(self):
client = openai.Client(api_key=self.api_key, base_url=f"{self.base_url}/v1")
long_text = "hello " * 999 # the maximum allowed length is 994 tokens
with self.assertRaises(openai.BadRequestError) as cm: with self.assertRaises(openai.BadRequestError) as cm:
client.chat.completions.create( client.chat.completions.create(
...@@ -58,7 +74,7 @@ class TestRequestLengthValidation(unittest.TestCase): ...@@ -58,7 +74,7 @@ class TestRequestLengthValidation(unittest.TestCase):
{"role": "user", "content": long_text}, {"role": "user", "content": long_text},
], ],
temperature=0, temperature=0,
max_tokens=500, max_tokens=1200,
) )
self.assertIn( self.assertIn(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment