generate.proto 3.02 KB
Newer Older
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
1
2
3
4
syntax = "proto3";

package generate.v1;

Olivier Dehaene's avatar
Olivier Dehaene committed
5
service TextGenerationService {
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
6
    /// Service discovery
Olivier Dehaene's avatar
Olivier Dehaene committed
7
    rpc ServiceDiscovery (ServiceDiscoveryRequest) returns (ServiceDiscoveryResponse) {}
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
8
    /// Empties batch cache
Olivier Dehaene's avatar
Olivier Dehaene committed
9
    rpc ClearCache (ClearCacheRequest) returns (ClearCacheResponse);
10
11
12
13
    /// Prefill batch and decode first token
    rpc Prefill (PrefillRequest) returns (PrefillResponse);
    /// Decode token for a list of prefilled batches
    rpc Decode (DecodeRequest) returns (DecodeResponse);
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
14
15
}

Olivier Dehaene's avatar
Olivier Dehaene committed
16
17
18
/// Empty request
message ServiceDiscoveryRequest {}

Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
19
message ServiceDiscoveryResponse {
Olivier Dehaene's avatar
Olivier Dehaene committed
20
    /// Other shards urls
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
21
22
23
    repeated string urls = 1;
}

Olivier Dehaene's avatar
Olivier Dehaene committed
24
25
26
27
28
29
/// Empty request
message ClearCacheRequest {}

/// Empty response
message ClearCacheResponse {}

OlivierDehaene's avatar
OlivierDehaene committed
30
message NextTokenChooserParameters {
31
    /// exponential scaling output probability distribution
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
32
    float temperature = 1;
33
    /// restricting to the k highest probability elements
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
34
    uint32 top_k = 2;
35
    /// restricting to top tokens summing to prob_cut_off <= prob_cut_off
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
36
    float top_p = 3;
37
    /// apply sampling on the logits
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
38
    bool do_sample = 4;
39
40
    /// random seed for sampling
    optional uint64 seed = 5;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
41
42
}

43
44
45
46
47
48
49
message StoppingCriteriaParameters {
    /// Maximum number of generated tokens
    uint32 max_new_tokens = 1;
    /// Optional stopping sequences
    repeated string stop_sequences = 2;
}

Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
50
51
52
53
54
message Request {
    /// Request ID
    uint64 id = 1;
    /// The generation context
    string inputs = 2;
Olivier Dehaene's avatar
Olivier Dehaene committed
55
56
    /// The number of tokens inside inputs
    uint32 input_length = 3;
OlivierDehaene's avatar
OlivierDehaene committed
57
58
    /// Next Token Chooser Parameters
    NextTokenChooserParameters parameters = 4;
59
60
    /// Stopping Criteria Parameters
    StoppingCriteriaParameters stopping_parameters = 5;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
61
62
63
64
65
66
67
}

message Batch {
    /// Batch ID
    uint64 id = 1;
    /// Individual requests
    repeated Request requests = 2;
Olivier Dehaene's avatar
Olivier Dehaene committed
68
69
    /// Batch size (==len(requests))
    uint32 size = 3;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
70
71
}

Olivier Dehaene's avatar
Olivier Dehaene committed
72
message GeneratedText {
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
73
    /// Output
74
    string text = 1;
75
    /// Number of generated tokens
76
    uint32 generated_tokens = 2;
77
    /// Finish reason
78
    string finish_reason = 3;
79
    /// Seed
80
    optional uint64 seed = 4;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
81
82
}

83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
message PrefillTokens {
    /// Prefill Token IDs
    repeated uint32 ids = 1;
    /// Prefill Logprobs
    repeated float logprobs = 2;
    /// Prefill tokens
    repeated string texts = 3;
}

message Generation {
    /// Request ID
    uint64 request_id = 1;
    /// Prefill tokens (optional)
    PrefillTokens prefill_tokens = 2;
    /// Token ID
    uint32 token_id = 3;
    /// Logprob
    float token_logprob = 4;
    /// Text
    string token_text = 5;
    /// Complete generated text
    GeneratedText generated_text = 6;
}

message PrefillRequest {
Olivier Dehaene's avatar
Olivier Dehaene committed
108
109
    /// Batch
    Batch batch = 1;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
110
111
}

112
113
114
message PrefillResponse {
    /// Generation
    repeated Generation generations = 1;
Olivier Dehaene's avatar
Olivier Dehaene committed
115
116
    /// Next batch (cached)
    optional Batch batch = 2;
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
117
118
}

119
message DecodeRequest {
Olivier Dehaene's avatar
Olivier Dehaene committed
120
121
122
    /// Cached batches
    repeated Batch batches = 1;
}
Olivier Dehaene's avatar
Init  
Olivier Dehaene committed
123

124
125
126
message DecodeResponse {
    /// Decodes
    repeated Generation generations = 1;
Olivier Dehaene's avatar
Olivier Dehaene committed
127
128
    /// Next batch (cached)
    optional Batch batch = 2;
129
}