import unittest import openai from sglang.srt.utils import kill_process_tree from sglang.test.test_utils import ( DEFAULT_SMALL_MODEL_NAME_FOR_TEST, DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH, DEFAULT_URL_FOR_TEST, popen_launch_server, ) class TestRequestLengthValidation(unittest.TestCase): @classmethod def setUpClass(cls): cls.base_url = DEFAULT_URL_FOR_TEST cls.api_key = "sk-123456" # Start server with auto truncate disabled cls.process = popen_launch_server( DEFAULT_SMALL_MODEL_NAME_FOR_TEST, cls.base_url, timeout=DEFAULT_TIMEOUT_FOR_SERVER_LAUNCH, api_key=cls.api_key, other_args=("--max-total-tokens", "1000", "--context-length", "100"), ) @classmethod def tearDownClass(cls): kill_process_tree(cls.process.pid) def test_input_length_validation(self): client = openai.Client(api_key=self.api_key, base_url=f"{self.base_url}/v1") long_text = "hello " * 100 # Will tokenize to more than context length with self.assertRaises(openai.BadRequestError) as cm: client.chat.completions.create( model=DEFAULT_SMALL_MODEL_NAME_FOR_TEST, messages=[ {"role": "user", "content": long_text}, ], temperature=0, ) self.assertIn("is longer than the model's context length", str(cm.exception)) def test_max_tokens_validation(self): client = openai.Client(api_key=self.api_key, base_url=f"{self.base_url}/v1") long_text = "hello " with self.assertRaises(openai.BadRequestError) as cm: client.chat.completions.create( model=DEFAULT_SMALL_MODEL_NAME_FOR_TEST, messages=[ {"role": "user", "content": long_text}, ], temperature=0, max_tokens=500, ) self.assertIn( "Requested token count exceeds the model's maximum context", str(cm.exception), ) if __name__ == "__main__": unittest.main()