Unverified Commit eb8f02dd authored by Sai Enduri's avatar Sai Enduri Committed by GitHub
Browse files

Update nightly thresholds and dependencies. (#6635)

parent 0ca3e568
...@@ -13,3 +13,5 @@ docker exec -w /human-eval ci_sglang pip install -e . ...@@ -13,3 +13,5 @@ docker exec -w /human-eval ci_sglang pip install -e .
docker exec -w / ci_sglang mkdir -p /dummy-grok docker exec -w / ci_sglang mkdir -p /dummy-grok
mkdir -p dummy-grok && wget https://sharkpublic.blob.core.windows.net/sharkpublic/sglang/dummy_grok.json -O dummy-grok/config.json mkdir -p dummy-grok && wget https://sharkpublic.blob.core.windows.net/sharkpublic/sglang/dummy_grok.json -O dummy-grok/config.json
docker cp ./dummy-grok ci_sglang:/ docker cp ./dummy-grok ci_sglang:/
docker exec ci_sglang pip install huggingface_hub[hf_xet]
...@@ -21,24 +21,24 @@ from sglang.test.test_utils import ( ...@@ -21,24 +21,24 @@ from sglang.test.test_utils import (
MODEL_SCORE_THRESHOLDS = { MODEL_SCORE_THRESHOLDS = {
"meta-llama/Llama-3.1-8B-Instruct": 0.82, "meta-llama/Llama-3.1-8B-Instruct": 0.82,
"mistralai/Mistral-7B-Instruct-v0.3": 0.56, "mistralai/Mistral-7B-Instruct-v0.3": 0.58,
"deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct": 0.85, "deepseek-ai/DeepSeek-Coder-V2-Lite-Instruct": 0.85,
"meta-llama/Llama-3.1-70B-Instruct": 0.95, "meta-llama/Llama-3.1-70B-Instruct": 0.95,
"mistralai/Mixtral-8x7B-Instruct-v0.1": 0.62, "mistralai/Mixtral-8x7B-Instruct-v0.1": 0.64,
"Qwen/Qwen2-57B-A14B-Instruct": 0.86, "Qwen/Qwen2-57B-A14B-Instruct": 0.86,
"neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8": 0.81, "neuralmagic/Meta-Llama-3.1-8B-Instruct-FP8": 0.82,
"neuralmagic/Mistral-7B-Instruct-v0.3-FP8": 0.54, "neuralmagic/Mistral-7B-Instruct-v0.3-FP8": 0.54,
"neuralmagic/Meta-Llama-3.1-70B-Instruct-FP8": 0.93, "neuralmagic/Meta-Llama-3.1-70B-Instruct-FP8": 0.94,
"neuralmagic/Qwen2-72B-Instruct-FP8": 0.93, "neuralmagic/Qwen2-72B-Instruct-FP8": 0.94,
"neuralmagic/Qwen2-57B-A14B-Instruct-FP8": 0.82, "neuralmagic/Qwen2-57B-A14B-Instruct-FP8": 0.86,
"neuralmagic/Mixtral-8x7B-Instruct-v0.1-FP8": 0.61,
"google/gemma-2-27b-it": 0.91,
} }
# Models currently failing on AMD MI300x. # Models currently failing on AMD MI300x.
failing_models = { failing_models = {
"google/gemma-2-27b-it",
"neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8", "neuralmagic/DeepSeek-Coder-V2-Lite-Instruct-FP8",
"neuralmagic/gemma-2-2b-it-FP8", "neuralmagic/gemma-2-2b-it-FP8",
"neuralmagic/Mixtral-8x7B-Instruct-v0.1-FP8",
} }
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment