endpoint.rs 3.62 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
// SPDX-FileCopyrightText: Copyright (c) 2025 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
// SPDX-License-Identifier: Apache-2.0
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

16
17
use triton_distributed_llm::{
    backend::Backend,
18
19
    http::service::discovery::ModelEntry,
    model_type::ModelType,
20
21
22
23
24
    preprocessor::OpenAIPreprocessor,
    types::{
        openai::chat_completions::{ChatCompletionRequest, ChatCompletionResponseDelta},
        Annotated,
    },
25
};
26
27
28
29
use triton_distributed_runtime::pipeline::{
    network::Ingress, ManyOut, Operator, SegmentSource, ServiceBackend, SingleIn, Source,
};
use triton_distributed_runtime::{protocols::Endpoint, DistributedRuntime, Runtime};
30

31
use crate::EngineConfig;
32
33
34
35
36
37
38
39
40

pub async fn run(
    runtime: Runtime,
    path: String,
    engine_config: EngineConfig,
) -> anyhow::Result<()> {
    // This will attempt to connect to NATS and etcd
    let distributed = DistributedRuntime::from_settings(runtime.clone()).await?;

41
    let cancel_token = runtime.primary_token().clone();
42
    let endpoint: Endpoint = path.parse()?;
43
44
45
46

    let etcd_client = distributed.etcd_client();

    let (ingress, service_name) = match engine_config {
47
48
49
        EngineConfig::StaticFull {
            service_name,
            engine,
50
51
52
53
54
        } => (Ingress::for_engine(engine)?, service_name),
        EngineConfig::StaticCore {
            service_name,
            engine: inner_engine,
            card,
55
        } => {
56
57
58
59
60
61
62
63
64
            let frontend = SegmentSource::<
                SingleIn<ChatCompletionRequest>,
                ManyOut<Annotated<ChatCompletionResponseDelta>>,
            >::new();
            let preprocessor = OpenAIPreprocessor::new(*card.clone())
                .await?
                .into_operator();
            let backend = Backend::from_mdc(*card.clone()).await?.into_operator();
            let engine = ServiceBackend::from_engine(inner_engine);
65

66
67
68
69
70
71
72
            let pipeline = frontend
                .link(preprocessor.forward_edge())?
                .link(backend.forward_edge())?
                .link(engine)?
                .link(backend.backward_edge())?
                .link(preprocessor.backward_edge())?
                .link(frontend)?;
73

74
            (Ingress::for_pipeline(pipeline)?, service_name)
75
76
77
78
        }
        EngineConfig::Dynamic(_) => {
            anyhow::bail!("Cannot use endpoint for both in and out");
        }
79
80
81
82
    };

    let model_registration = ModelEntry {
        name: service_name.to_string(),
83
        endpoint: endpoint.clone(),
84
        model_type: ModelType::Chat,
85
86
87
88
89
90
91
92
93
94
    };
    etcd_client
        .kv_create(
            path.clone(),
            serde_json::to_vec_pretty(&model_registration)?,
            None,
        )
        .await?;

    let rt_fut = distributed
95
96
        .namespace(endpoint.namespace)?
        .component(endpoint.component)?
97
98
99
        .service_builder()
        .create()
        .await?
100
        .endpoint(endpoint.name)
101
102
103
104
105
106
107
108
109
110
        .endpoint_builder()
        .handler(ingress)
        .start();

    tokio::select! {
        _ = rt_fut => {
            tracing::debug!("Endpoint ingress ended");
        }
        _ = cancel_token.cancelled() => {
        }
111
    }
112
    Ok(())
113
}