# SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Common: model: neuralmagic/DeepSeek-R1-Distill-Llama-70B-FP8-dynamic kv-transfer-config: '{"kv_connector":"DynamoNixlConnector"}' router: round-robin # Number of tokens in a batch for more efficient chunked transfers to GPUs. block-size: 128 max-model-len: 3500 max-num-batched-tokens: 3500 disable-log-requests: true Frontend: # This model was chosen for its 70B size and FP8 precision, which the TP and # DP configurations were tuned for its size, and its precision reduces model # and KV cache memory usage and easing remote cache transfer. served_model_name: neuralmagic/DeepSeek-R1-Distill-Llama-70B-FP8-dynamic endpoint: dynamo.Processor.chat/completions port: 8000 Processor: common-configs: [model, router] # x1 process with 4 GPUs generating output tokens (the "decode" phase). VllmWorker: common-configs: [model, kv-transfer-config, router, block-size, max-model-len, disable-log-requests] # Enable prefill at different workers. remote-prefill: true # Disable local prefill so only disaggregated prefill is used. conditional-disagg: false gpu-memory-utilization: 0.95 tensor-parallel-size: 4 ServiceArgs: workers: 1 resources: gpu: 4 # x4 processes each with 1 GPU handling the initial prefill (context embedding) phase. PrefillWorker: common-configs: [model, kv-transfer-config, block-size, max-model-len, max-num-batched-tokens, gpu-memory-utilization, disable-log-requests] tensor-parallel-size: 1 ServiceArgs: workers: 4 resources: gpu: 1 # Automatic prefix caching is disabled by default, since all requests are expected to be unique.