backend.h 3.99 KB
Newer Older
Nicolas Patry's avatar
Nicolas Patry committed
1
2
3
4
5
6
7
//
// Created by Morgan Funtowicz on 6/30/24.
//

#ifndef TGI_TRTLLM_BACKEND_H
#define TGI_TRTLLM_BACKEND_H

8
#include <array>
Nicolas Patry's avatar
Nicolas Patry committed
9
10
11
12
13
14
15
16
17
18
19
20
21
22
#include <cmath>
#include <filesystem>
#include <span>
#include <vector>

#include <nlohmann/json.hpp>

#include <tensorrt_llm/runtime/common.h>
#include <tensorrt_llm/executor/executor.h>
#include <tensorrt_llm/plugins/api/tllmPlugin.h>

using json = nlohmann::json;
namespace tle = tensorrt_llm::executor;

23
24
25

#define CAST_SIZETYPE(x) static_cast<tle::SizeType32>(x)

Nicolas Patry's avatar
Nicolas Patry committed
26
27
28
29
namespace huggingface::tgi::backends {
    using RequestId = tle::IdType;
    using TokenId = tle::TokenIdType;

30
    const static auto OUTPUT_CONFIG = tle::OutputConfig(true, false, false, true, false);
31
32
    constexpr auto FMT_NOT_ENOUGH_GPUS = FMT_STRING(
            "Not enough GPUs to allocate requested model (detected: {:d}, required: {:d})");
33
34
35
36
37
    constexpr auto FMT_EXECUTOR_STATS = FMT_STRING(
            "Submitting inference [{}] to the executor ({:d} already in-flight)");
    constexpr auto FMT_SAMPLING_CONFIG = FMT_STRING(
            "Sampling: topK={:d}, topP={:.1f}, temperature={:.1f}, repetition_penalty={:.1f}, frequency_penalty={:.1f}, seed={:d}");

Nicolas Patry's avatar
Nicolas Patry committed
38
39
40
41
42
43
    /**
     * Initialize all the components required by TRTLLM.
     * It is required to call this function before attempting to load any engine
     */
    void InitializeBackend();

44
45
46
47
48
49
    /**
     * Initialize logging mechanism
     */
    void InitializeLogging();


Nicolas Patry's avatar
Nicolas Patry committed
50
51
52
53
54
55
56
57
    /**
     *
     * @param config TensorRT-LLM configuration object
     * @param workerPath Path to the "executorWorker" provided by TensorRT-LLM when using orchestrator mode
     * @return
     */
    tle::ExecutorConfig GetExecutorConfig(const json &config, const std::string &workerPath);

58
59
60
61
62
63
64
65
    /**
     *
     * @param worldSize
     * @param workerPath
     * @return
     */
    tle::ParallelConfig GetParallelConfig(size_t worldSize, std::string workerPath) noexcept;

Nicolas Patry's avatar
Nicolas Patry committed
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
    /**
     * Get the sampling configuration from the parameters provided by TGI
     * @param topK
     * @param topP
     * @param temperature
     * @param repetition_penalty
     * @param frequency_penalty
     * @param seed
     * @return
     */
    tle::SamplingConfig GetSamplingConfig(
            uint32_t topK,
            float_t topP,
            float_t temperature,
            float_t repetition_penalty,
            float_t frequency_penalty,
            uint64_t seed
83
    ) noexcept;
Nicolas Patry's avatar
Nicolas Patry committed
84

85
86
87
88
89
90
91
92
    /**
     * Attempt to retrieve the
     * @param generationConfigPath
     * @return
     */
    std::optional<std::list<std::vector<TokenId>>>
    GetStopWordsFromConfig(const std::filesystem::path &generationConfigPath) noexcept;

Nicolas Patry's avatar
Nicolas Patry committed
93
94
95
96
97
98
99
100
    /**
     *
     */
    class TensorRtLlmBackend {
    private:
        const json config;
        tle::Executor executor;

101
102
        /** Frequently accessed variables cached here **/
        uint32_t maxNumTokens;
103
        std::list<std::vector<TokenId>> stopWords;
104

Nicolas Patry's avatar
Nicolas Patry committed
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
    public:
        explicit TensorRtLlmBackend(
                const std::filesystem::path &engineFolder,
                const std::filesystem::path &executorWorker
        );

        /**
         * Query the executor for the number of token available for pulling
         * @return
         */
        [[nodiscard]] size_t NumResponsesReady() const;

        /**
         * Submit a new generation task to the executor
         * @param tokens
         * @param topK
         * @param topP
         * @param temperature
123
124
         * @param repetitionPenalty
         * @param frequencyPenalty
Nicolas Patry's avatar
Nicolas Patry committed
125
126
127
128
129
         * @param seed
         * @return Request id related to this generation for reference
         */
        [[nodiscard]] RequestId Submit(
                const std::vector<TokenId> &tokens,
130
131
132
133
134
135
136
                uint32_t maxNewTokens,
                int32_t topK,
                float_t topP,
                float_t temperature,
                float_t repetitionPenalty,
                float_t frequencyPenalty,
                uint64_t seed
Nicolas Patry's avatar
Nicolas Patry committed
137
138
        );

139
        [[nodiscard]] std::vector<tle::Response> PullNewTokens();
Nicolas Patry's avatar
Nicolas Patry committed
140
141
142
143
144
    };
}


#endif //TGI_TRTLLM_BACKEND_H