generate.proto 6.29 KB
Newer Older
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
1
2
syntax = "proto3";

Nicolas Patry's avatar
Nicolas Patry committed
3
package generate.v2;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
4

Olivier Dehaene's avatar
Olivier Dehaene committed
5
service TextGenerationService {
6
7
    /// Model Info
    rpc Info (InfoRequest) returns (InfoResponse) {}
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
8
    /// Service discovery
Olivier Dehaene's avatar
Olivier Dehaene committed
9
    rpc ServiceDiscovery (ServiceDiscoveryRequest) returns (ServiceDiscoveryResponse) {}
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
10
    /// Empties batch cache
Olivier Dehaene's avatar
Olivier Dehaene committed
11
    rpc ClearCache (ClearCacheRequest) returns (ClearCacheResponse);
12
13
    /// Remove requests from a cached batch
    rpc FilterBatch (FilterBatchRequest) returns (FilterBatchResponse);
14
15
    /// Warmup the model and compute max cache size
    rpc Warmup (WarmupRequest) returns (WarmupResponse);
16
17
18
19
    /// Prefill batch and decode first token
    rpc Prefill (PrefillRequest) returns (PrefillResponse);
    /// Decode token for a list of prefilled batches
    rpc Decode (DecodeRequest) returns (DecodeResponse);
20
21
    /// Health check
    rpc Health (HealthRequest) returns (HealthResponse);
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
22
23
}

24
25
26
message HealthRequest {}
message HealthResponse {}

27
28
29
30
31
32
33
/// Empty request
message InfoRequest {}

message InfoResponse {
    bool requires_padding = 1;
    string dtype = 2;
    string device_type = 3;
34
    optional uint32 window_size = 4;
Nicolas Patry's avatar
Nicolas Patry committed
35
    uint32 speculate = 5;
36
37
}

Olivier Dehaene's avatar
Olivier Dehaene committed
38
39
40
/// Empty request
message ServiceDiscoveryRequest {}

Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
41
message ServiceDiscoveryResponse {
Olivier Dehaene's avatar
Olivier Dehaene committed
42
    /// Other shards urls
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
43
44
45
    repeated string urls = 1;
}

46
47
48
49
message ClearCacheRequest {
    /// Optional batch id
    optional uint64 id = 1;
}
Olivier Dehaene's avatar
Olivier Dehaene committed
50
51
52
53

/// Empty response
message ClearCacheResponse {}

54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
message Image {
    /// Binary image data.
    bytes data = 1;

    /// Image MIME type.
    string mimetype = 2;
}

message InputChunk {
    oneof chunk {
        /// Plain text data
        string text = 1;
        /// Image data
        Image image = 2;
    }
}

message Input {
    repeated InputChunk chunks = 1;
  }

drbh's avatar
drbh committed
75
76
77
78
79
80
enum GrammarType {
    GRAMMAR_TYPE_NONE = 0;
    GRAMMAR_TYPE_JSON = 1;
    GRAMMAR_TYPE_REGEX = 2;
}

OlivierDehaene's avatar
OlivierDehaene committed
81
message NextTokenChooserParameters {
82
    /// exponential scaling output probability distribution
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
83
    float temperature = 1;
84
    /// restricting to the k highest probability elements
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
85
    uint32 top_k = 2;
86
    /// restricting to top tokens summing to prob_cut_off <= prob_cut_off
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
87
    float top_p = 3;
88
89
    /// restricting to top tokens summing to prob_cut_off <= prob_cut_off
    float typical_p = 4;
90
    /// apply sampling on the logits
91
    bool do_sample = 5;
92
    /// random seed for sampling
93
    uint64 seed = 6;
94
    /// repetition penalty
95
    float repetition_penalty = 7;
96
97
    /// frequency penalty
    float frequency_penalty = 9;
98
    /// token watermarking using "A Watermark for Large Language Models"
99
    bool watermark = 8;
drbh's avatar
drbh committed
100
101
102
103
    /// grammar (applied if not empty)
    string grammar = 10;
    /// grammar type
    GrammarType grammar_type = 11;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
104
105
}

106
107
108
109
110
message StoppingCriteriaParameters {
    /// Maximum number of generated tokens
    uint32 max_new_tokens = 1;
    /// Optional stopping sequences
    repeated string stop_sequences = 2;
111
112
113
    /// Ignore end of sequence token
    /// used for benchmarking
    bool ignore_eos_token = 3;
114
115
}

Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
116
117
118
message Request {
    /// Request ID
    uint64 id = 1;
119
120
121
    /// The generation context as chunks
    Input input_chunks = 8;
    /// The generation context, stringified input_chunks
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
122
    string inputs = 2;
123
124
    /// Context truncation
    uint32 truncate = 3;
OlivierDehaene's avatar
OlivierDehaene committed
125
    /// Next Token Chooser Parameters
126
    NextTokenChooserParameters parameters = 4;
127
    /// Stopping Criteria Parameters
128
    StoppingCriteriaParameters stopping_parameters = 5;
129
130
    /// Return prefill logprobs
    bool prefill_logprobs = 6;
Nicolas Patry's avatar
Nicolas Patry committed
131
132
    /// Return most likely n tokens
    uint32 top_n_tokens = 7;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
133
134
135
136
137
138
139
}

message Batch {
    /// Batch ID
    uint64 id = 1;
    /// Individual requests
    repeated Request requests = 2;
Olivier Dehaene's avatar
Olivier Dehaene committed
140
141
    /// Batch size (==len(requests))
    uint32 size = 3;
142
143
    /// Maximum number of tokens this batch will grow to
    uint32 max_tokens = 4;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
144
145
}

146
147
148
149
150
151
152
153
154
155
156
message CachedBatch {
    /// Batch ID
    uint64 id = 1;
    /// Individual requests ids
    repeated uint64 request_ids = 2;
    /// Batch size (==len(requests))
    uint32 size = 3;
    /// Maximum number of tokens this batch will grow to
    uint32 max_tokens = 4;
}

157
158
159
160
161
162
enum FinishReason {
    FINISH_REASON_LENGTH = 0;
    FINISH_REASON_EOS_TOKEN = 1;
    FINISH_REASON_STOP_SEQUENCE = 2;
}

Olivier Dehaene's avatar
Olivier Dehaene committed
163
message GeneratedText {
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
164
    /// Output
165
    string text = 1;
166
    /// Number of generated tokens
167
    uint32 generated_tokens = 2;
168
    /// Finish reason
169
    FinishReason finish_reason = 3;
170
    /// Seed
171
    optional uint64 seed = 4;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
172
173
}

Nicolas Patry's avatar
Nicolas Patry committed
174
175
message Tokens {
    /// Token IDs
176
    repeated uint32 ids = 1;
Nicolas Patry's avatar
Nicolas Patry committed
177
    /// Logprobs
178
    repeated float logprobs = 2;
Nicolas Patry's avatar
Nicolas Patry committed
179
    /// tokens
180
    repeated string texts = 3;
Nicolas Patry's avatar
Nicolas Patry committed
181
182
    /// special
    repeated bool is_special = 4;
Nicolas Patry's avatar
Nicolas Patry committed
183
184
}

185
186
187
188
message Generation {
    /// Request ID
    uint64 request_id = 1;
    /// Prefill tokens (optional)
Nicolas Patry's avatar
Nicolas Patry committed
189
190
    Tokens prefill_tokens = 2;
    Tokens tokens = 3;
191
    /// Complete generated text
Nicolas Patry's avatar
Nicolas Patry committed
192
    optional GeneratedText generated_text = 4;
Nicolas Patry's avatar
Nicolas Patry committed
193
    /// Top tokens
Nicolas Patry's avatar
Nicolas Patry committed
194
    repeated Tokens top_tokens = 5;
195
196
}

197
198
199
200
message FilterBatchRequest {
    /// Batch ID
    uint64 batch_id = 1;
    /// Requests to keep
201
    repeated uint64 request_ids = 2;
202
203
204
205
}

message FilterBatchResponse {
    /// Filtered Batch (cached)
206
    CachedBatch batch = 1;
207
208
209
}


210
message PrefillRequest {
Olivier Dehaene's avatar
Olivier Dehaene committed
211
212
    /// Batch
    Batch batch = 1;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
213
214
}

215
216
217
message PrefillResponse {
    /// Generation
    repeated Generation generations = 1;
Olivier Dehaene's avatar
Olivier Dehaene committed
218
    /// Next batch (cached)
219
    optional CachedBatch batch = 2;
220
221
222
223
224
225
    /// Forward elapsed time in nanoseconds
    uint64 forward_ns = 3;
    /// Decode elapsed time in nanoseconds
    uint64 decode_ns = 4;
    /// Total elapsed time in nanoseconds
    uint64 total_ns = 5;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
226
227
}

228
message DecodeRequest {
Olivier Dehaene's avatar
Olivier Dehaene committed
229
    /// Cached batches
230
    repeated CachedBatch batches = 1;
Olivier Dehaene's avatar
Olivier Dehaene committed
231
}
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
232

233
234
235
message DecodeResponse {
    /// Decodes
    repeated Generation generations = 1;
Olivier Dehaene's avatar
Olivier Dehaene committed
236
    /// Next batch (cached)
237
    optional CachedBatch batch = 2;
238
239
240
241
242
243
244
245
    /// Forward elapsed time in nanoseconds
    uint64 forward_ns = 3;
    /// Decode elapsed time in nanoseconds
    uint64 decode_ns = 4;
    /// Total elapsed time in nanoseconds
    uint64 total_ns = 5;
    /// Concatenate elapsed time in nanoseconds
    optional uint64 concat_ns = 6;
246
}
247
248
249
250

message WarmupRequest {
    /// Batch to warmup on
    Batch batch = 1;
251
252
253
    uint32 max_input_length = 2;
    uint32 max_prefill_tokens = 3;
    uint32 max_total_tokens = 4;
254
255
}

256
257
258
259
message WarmupResponse {
    /// Maximum number of tokens supported by the model
    optional uint32 max_supported_total_tokens = 1;
}