generate.proto 5.37 KB
Newer Older
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
1
2
3
4
syntax = "proto3";

package generate.v1;

Olivier Dehaene's avatar
Olivier Dehaene committed
5
service TextGenerationService {
6
7
    /// Model Info
    rpc Info (InfoRequest) returns (InfoResponse) {}
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
8
    /// Service discovery
Olivier Dehaene's avatar
Olivier Dehaene committed
9
    rpc ServiceDiscovery (ServiceDiscoveryRequest) returns (ServiceDiscoveryResponse) {}
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
10
    /// Empties batch cache
Olivier Dehaene's avatar
Olivier Dehaene committed
11
    rpc ClearCache (ClearCacheRequest) returns (ClearCacheResponse);
12
13
    /// Remove requests from a cached batch
    rpc FilterBatch (FilterBatchRequest) returns (FilterBatchResponse);
14
15
    /// Warmup the model and compute max cache size
    rpc Warmup (WarmupRequest) returns (WarmupResponse);
16
17
18
19
    /// Prefill batch and decode first token
    rpc Prefill (PrefillRequest) returns (PrefillResponse);
    /// Decode token for a list of prefilled batches
    rpc Decode (DecodeRequest) returns (DecodeResponse);
20
21
    /// Health check
    rpc Health (HealthRequest) returns (HealthResponse);
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
22
23
}

24
25
26
message HealthRequest {}
message HealthResponse {}

27
28
29
30
31
32
33
34
35
/// Empty request
message InfoRequest {}

message InfoResponse {
    bool requires_padding = 1;
    string dtype = 2;
    string device_type = 3;
}

Olivier Dehaene's avatar
Olivier Dehaene committed
36
37
38
/// Empty request
message ServiceDiscoveryRequest {}

Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
39
message ServiceDiscoveryResponse {
Olivier Dehaene's avatar
Olivier Dehaene committed
40
    /// Other shards urls
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
41
42
43
    repeated string urls = 1;
}

44
45
46
47
message ClearCacheRequest {
    /// Optional batch id
    optional uint64 id = 1;
}
Olivier Dehaene's avatar
Olivier Dehaene committed
48
49
50
51

/// Empty response
message ClearCacheResponse {}

OlivierDehaene's avatar
OlivierDehaene committed
52
message NextTokenChooserParameters {
53
    /// exponential scaling output probability distribution
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
54
    float temperature = 1;
55
    /// restricting to the k highest probability elements
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
56
    uint32 top_k = 2;
57
    /// restricting to top tokens summing to prob_cut_off <= prob_cut_off
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
58
    float top_p = 3;
59
60
    /// restricting to top tokens summing to prob_cut_off <= prob_cut_off
    float typical_p = 4;
61
    /// apply sampling on the logits
62
    bool do_sample = 5;
63
    /// random seed for sampling
64
    uint64 seed = 6;
65
    /// repetition penalty
66
    float repetition_penalty = 7;
67
    /// token watermarking using "A Watermark for Large Language Models"
68
    bool watermark = 8;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
69
70
}

71
72
73
74
75
message StoppingCriteriaParameters {
    /// Maximum number of generated tokens
    uint32 max_new_tokens = 1;
    /// Optional stopping sequences
    repeated string stop_sequences = 2;
76
77
78
    /// Ignore end of sequence token
    /// used for benchmarking
    bool ignore_eos_token = 3;
79
80
}

Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
81
82
83
84
85
message Request {
    /// Request ID
    uint64 id = 1;
    /// The generation context
    string inputs = 2;
86
87
    /// Context truncation
    uint32 truncate = 3;
OlivierDehaene's avatar
OlivierDehaene committed
88
    /// Next Token Chooser Parameters
89
    NextTokenChooserParameters parameters = 4;
90
    /// Stopping Criteria Parameters
91
    StoppingCriteriaParameters stopping_parameters = 5;
92
93
    /// Return prefill logprobs
    bool prefill_logprobs = 6;
Nicolas Patry's avatar
Nicolas Patry committed
94
95
    /// Return most likely n tokens
    uint32 top_n_tokens = 7;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
96
97
98
99
100
101
102
}

message Batch {
    /// Batch ID
    uint64 id = 1;
    /// Individual requests
    repeated Request requests = 2;
Olivier Dehaene's avatar
Olivier Dehaene committed
103
104
    /// Batch size (==len(requests))
    uint32 size = 3;
105
106
    /// Maximum number of tokens this batch will grow to
    uint32 max_tokens = 4;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
107
108
}

109
110
111
112
113
114
115
116
117
118
119
message CachedBatch {
    /// Batch ID
    uint64 id = 1;
    /// Individual requests ids
    repeated uint64 request_ids = 2;
    /// Batch size (==len(requests))
    uint32 size = 3;
    /// Maximum number of tokens this batch will grow to
    uint32 max_tokens = 4;
}

120
121
122
123
124
125
enum FinishReason {
    FINISH_REASON_LENGTH = 0;
    FINISH_REASON_EOS_TOKEN = 1;
    FINISH_REASON_STOP_SEQUENCE = 2;
}

Olivier Dehaene's avatar
Olivier Dehaene committed
126
message GeneratedText {
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
127
    /// Output
128
    string text = 1;
129
    /// Number of generated tokens
130
    uint32 generated_tokens = 2;
131
    /// Finish reason
132
    FinishReason finish_reason = 3;
133
    /// Seed
134
    optional uint64 seed = 4;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
135
136
}

137
138
139
140
141
142
143
144
145
message PrefillTokens {
    /// Prefill Token IDs
    repeated uint32 ids = 1;
    /// Prefill Logprobs
    repeated float logprobs = 2;
    /// Prefill tokens
    repeated string texts = 3;
}

Nicolas Patry's avatar
Nicolas Patry committed
146
147
148
149
150
151
152
153
154
155
156
message TopTokens {
    /// Top Token IDs
    repeated uint32 ids = 1;
    /// Top Logprobs
    repeated float logprobs = 2;
    /// Top Token Texts
    repeated string texts = 3;
    /// If the tokens are special
    repeated bool is_special = 6;
}

157
158
159
160
161
162
163
164
165
166
167
message Generation {
    /// Request ID
    uint64 request_id = 1;
    /// Prefill tokens (optional)
    PrefillTokens prefill_tokens = 2;
    /// Token ID
    uint32 token_id = 3;
    /// Logprob
    float token_logprob = 4;
    /// Text
    string token_text = 5;
168
169
    /// Is it a special token
    bool token_is_special = 6;
170
    /// Complete generated text
171
    optional GeneratedText generated_text = 7;
Nicolas Patry's avatar
Nicolas Patry committed
172
173
    /// Top tokens
    TopTokens top_tokens = 8;
174
175
}

176
177
178
179
message FilterBatchRequest {
    /// Batch ID
    uint64 batch_id = 1;
    /// Requests to keep
180
    repeated uint64 request_ids = 2;
181
182
183
184
}

message FilterBatchResponse {
    /// Filtered Batch (cached)
185
    CachedBatch batch = 1;
186
187
188
}


189
message PrefillRequest {
Olivier Dehaene's avatar
Olivier Dehaene committed
190
191
    /// Batch
    Batch batch = 1;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
192
193
}

194
195
196
message PrefillResponse {
    /// Generation
    repeated Generation generations = 1;
Olivier Dehaene's avatar
Olivier Dehaene committed
197
    /// Next batch (cached)
198
    optional CachedBatch batch = 2;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
199
200
}

201
message DecodeRequest {
Olivier Dehaene's avatar
Olivier Dehaene committed
202
    /// Cached batches
203
    repeated CachedBatch batches = 1;
Olivier Dehaene's avatar
Olivier Dehaene committed
204
}
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
205

206
207
208
message DecodeResponse {
    /// Decodes
    repeated Generation generations = 1;
Olivier Dehaene's avatar
Olivier Dehaene committed
209
    /// Next batch (cached)
210
    optional CachedBatch batch = 2;
211
}
212
213
214
215
216
217
218

message WarmupRequest {
    /// Batch to warmup on
    Batch batch = 1;
}

/// Empty response
219
220
221
222
message WarmupResponse {
    /// Maximum number of tokens supported by the model
    optional uint32 max_supported_total_tokens = 1;
}