"vscode:/vscode.git/clone" did not exist on "c951da70965981cedcd58827d53eec91d5167e44"
Unverified Commit 8ce830a8 authored by Chang Su's avatar Chang Su Committed by GitHub
Browse files

[router][bugfix] Fix input_logprobs handling with None value and `logprob_start_len = -1` (#11113)

parent fb367acf
...@@ -486,6 +486,56 @@ class GrpcRequestManager: ...@@ -486,6 +486,56 @@ class GrpcRequestManager:
if self.gracefully_exit: if self.gracefully_exit:
break break
def _convert_logprob_style(
self,
state: GrpcReqState,
batch_out: BatchTokenIDOut,
batch_index: int,
):
"""
Convert and accumulate logprobs from batch output to state.
Follows the same logic as tokenizer_manager.convert_logprob_style.
"""
# Early exit if no input logprobs at all
if batch_out.input_token_logprobs_val is None:
return
# Accumulate input token logprobs (only if list is non-empty)
if len(batch_out.input_token_logprobs_val) > 0:
state.input_token_logprobs_val.extend(
batch_out.input_token_logprobs_val[batch_index]
)
state.input_token_logprobs_idx.extend(
batch_out.input_token_logprobs_idx[batch_index]
)
# Always accumulate output token logprobs
state.output_token_logprobs_val.extend(
batch_out.output_token_logprobs_val[batch_index]
)
state.output_token_logprobs_idx.extend(
batch_out.output_token_logprobs_idx[batch_index]
)
# Handle top logprobs if requested
if state.obj.top_logprobs_num > 0:
# Accumulate input top logprobs (only if list is non-empty)
if len(batch_out.input_top_logprobs_val) > 0:
state.input_top_logprobs_val.extend(
batch_out.input_top_logprobs_val[batch_index]
)
state.input_top_logprobs_idx.extend(
batch_out.input_top_logprobs_idx[batch_index]
)
# Always accumulate output top logprobs
state.output_top_logprobs_val.extend(
batch_out.output_top_logprobs_val[batch_index]
)
state.output_top_logprobs_idx.extend(
batch_out.output_top_logprobs_idx[batch_index]
)
async def _handle_batch_output(self, batch_out: BatchTokenIDOut): async def _handle_batch_output(self, batch_out: BatchTokenIDOut):
"""Handle batch generation output from scheduler.""" """Handle batch generation output from scheduler."""
# Process each request in the batch # Process each request in the batch
...@@ -526,35 +576,16 @@ class GrpcRequestManager: ...@@ -526,35 +576,16 @@ class GrpcRequestManager:
}, },
} }
# Accumulate input logprobs (only once, usually in first chunk) # Accumulate logprobs (following tokenizer_manager pattern)
if batch_out.input_token_logprobs_val and i < len( if state.obj.return_logprob:
batch_out.input_token_logprobs_val self._convert_logprob_style(state, batch_out, i)
):
if not state.input_token_logprobs_val:
state.input_token_logprobs_val.extend(
batch_out.input_token_logprobs_val[i]
)
if batch_out.input_token_logprobs_idx and i < len(
batch_out.input_token_logprobs_idx
):
state.input_token_logprobs_idx.extend(
batch_out.input_token_logprobs_idx[i]
)
if batch_out.input_top_logprobs_val and i < len(
batch_out.input_top_logprobs_val
):
state.input_top_logprobs_val.extend(
batch_out.input_top_logprobs_val[i]
)
if batch_out.input_top_logprobs_idx and i < len(
batch_out.input_top_logprobs_idx
):
state.input_top_logprobs_idx.extend(
batch_out.input_top_logprobs_idx[i]
)
# Send input logprobs based on mode # Send input logprobs based if available
if state.input_token_logprobs_val: if (
state.obj.return_logprob
and state.obj.logprob_start_len >= 0
and state.input_token_logprobs_val
):
if state.obj.stream and not state.input_logprobs_sent: if state.obj.stream and not state.input_logprobs_sent:
# Streaming: send input logprobs once in first chunk that has them # Streaming: send input logprobs once in first chunk that has them
output_data["input_logprobs"] = { output_data["input_logprobs"] = {
...@@ -573,33 +604,12 @@ class GrpcRequestManager: ...@@ -573,33 +604,12 @@ class GrpcRequestManager:
"top_logprobs_idx": state.input_top_logprobs_idx, "top_logprobs_idx": state.input_top_logprobs_idx,
} }
# Add output logprobs if available (RAW - no detokenization!) # Send output logprobs if available
if batch_out.output_token_logprobs_val and i < len( if (
batch_out.output_token_logprobs_val state.obj.return_logprob
and batch_out.output_token_logprobs_val
and i < len(batch_out.output_token_logprobs_val)
): ):
# Accumulate in state first
state.output_token_logprobs_val.extend(
batch_out.output_token_logprobs_val[i]
)
if batch_out.output_token_logprobs_idx and i < len(
batch_out.output_token_logprobs_idx
):
state.output_token_logprobs_idx.extend(
batch_out.output_token_logprobs_idx[i]
)
if batch_out.output_top_logprobs_val and i < len(
batch_out.output_top_logprobs_val
):
state.output_top_logprobs_val.extend(
batch_out.output_top_logprobs_val[i]
)
if batch_out.output_top_logprobs_idx and i < len(
batch_out.output_top_logprobs_idx
):
state.output_top_logprobs_idx.extend(
batch_out.output_top_logprobs_idx[i]
)
if state.obj.stream: if state.obj.stream:
# For streaming: send incremental logprobs (only new tokens in this chunk) # For streaming: send incremental logprobs (only new tokens in this chunk)
# NOTE: this is different than TokenizerManager, which always accumulates # NOTE: this is different than TokenizerManager, which always accumulates
......
...@@ -415,7 +415,11 @@ class SGLangSchedulerServicer(sglang_scheduler_pb2_grpc.SglangSchedulerServicer) ...@@ -415,7 +415,11 @@ class SGLangSchedulerServicer(sglang_scheduler_pb2_grpc.SglangSchedulerServicer)
mm_inputs=None, # TODO: implement mm support mm_inputs=None, # TODO: implement mm support
sampling_params=sampling_params, sampling_params=sampling_params,
return_logprob=grpc_req.return_logprob, return_logprob=grpc_req.return_logprob,
logprob_start_len=grpc_req.logprob_start_len or -1, logprob_start_len=(
grpc_req.logprob_start_len
if grpc_req.logprob_start_len is not None
else -1
),
top_logprobs_num=grpc_req.top_logprobs_num or 0, top_logprobs_num=grpc_req.top_logprobs_num or 0,
stream=grpc_req.stream or False, stream=grpc_req.stream or False,
lora_id=grpc_req.lora_id if grpc_req.lora_id else None, lora_id=grpc_req.lora_id if grpc_req.lora_id else None,
...@@ -486,10 +490,10 @@ class SGLangSchedulerServicer(sglang_scheduler_pb2_grpc.SglangSchedulerServicer) ...@@ -486,10 +490,10 @@ class SGLangSchedulerServicer(sglang_scheduler_pb2_grpc.SglangSchedulerServicer)
ignore_eos=grpc_params.ignore_eos, ignore_eos=grpc_params.ignore_eos,
) )
def _convert_logprobs_to_proto( def _convert_output_logprobs_to_proto(
self, logprobs_data: Dict self, logprobs_data: Dict
) -> Optional[sglang_scheduler_pb2.LogProbs]: ) -> Optional[sglang_scheduler_pb2.OutputLogProbs]:
"""Convert logprobs dict to proto LogProbs format (transport RAW data only).""" """Convert output logprobs dict to proto (no None values, plain floats)."""
if not logprobs_data: if not logprobs_data:
return None return None
...@@ -509,8 +513,47 @@ class SGLangSchedulerServicer(sglang_scheduler_pb2_grpc.SglangSchedulerServicer) ...@@ -509,8 +513,47 @@ class SGLangSchedulerServicer(sglang_scheduler_pb2_grpc.SglangSchedulerServicer)
) )
) )
return sglang_scheduler_pb2.LogProbs( return sglang_scheduler_pb2.OutputLogProbs(
token_logprobs=token_logprobs_val, token_logprobs=token_logprobs_val, # Plain float array
token_ids=token_logprobs_idx,
top_logprobs=top_logprobs_proto,
)
def _convert_input_logprobs_to_proto(
self, logprobs_data: Dict
) -> Optional[sglang_scheduler_pb2.InputLogProbs]:
"""Convert input logprobs dict to proto (first token is None, wrapped in InputTokenLogProb)."""
if not logprobs_data:
return None
token_logprobs_val = logprobs_data.get("token_logprobs_val", [])
token_logprobs_idx = logprobs_data.get("token_logprobs_idx", [])
top_logprobs_val = logprobs_data.get("top_logprobs_val", [])
top_logprobs_idx = logprobs_data.get("top_logprobs_idx", [])
# Wrap values in InputTokenLogProb (None for first token, value for others)
token_logprobs_wrapped = [
(
sglang_scheduler_pb2.InputTokenLogProb()
if x is None
else sglang_scheduler_pb2.InputTokenLogProb(value=x)
)
for x in token_logprobs_val
]
# Build TopLogProbs entries
top_logprobs_proto = []
if top_logprobs_val and top_logprobs_idx:
for val_list, idx_list in zip(top_logprobs_val, top_logprobs_idx):
top_logprobs_proto.append(
sglang_scheduler_pb2.TopLogProbs(
values=val_list,
token_ids=idx_list,
)
)
return sglang_scheduler_pb2.InputLogProbs(
token_logprobs=token_logprobs_wrapped,
token_ids=token_logprobs_idx, token_ids=token_logprobs_idx,
top_logprobs=top_logprobs_proto, top_logprobs=top_logprobs_proto,
) )
...@@ -522,12 +565,12 @@ class SGLangSchedulerServicer(sglang_scheduler_pb2_grpc.SglangSchedulerServicer) ...@@ -522,12 +565,12 @@ class SGLangSchedulerServicer(sglang_scheduler_pb2_grpc.SglangSchedulerServicer)
meta_info = output.get("meta_info", {}) meta_info = output.get("meta_info", {})
# Convert output logprobs if present # Convert output logprobs if present
output_logprobs_proto = self._convert_logprobs_to_proto( output_logprobs_proto = self._convert_output_logprobs_to_proto(
output.get("output_logprobs") output.get("output_logprobs")
) )
# Convert input logprobs if present (only in first chunk) # Convert input logprobs if present (only in first chunk)
input_logprobs_proto = self._convert_logprobs_to_proto( input_logprobs_proto = self._convert_input_logprobs_to_proto(
output.get("input_logprobs") output.get("input_logprobs")
) )
...@@ -576,12 +619,12 @@ class SGLangSchedulerServicer(sglang_scheduler_pb2_grpc.SglangSchedulerServicer) ...@@ -576,12 +619,12 @@ class SGLangSchedulerServicer(sglang_scheduler_pb2_grpc.SglangSchedulerServicer)
matched_stop_kwargs["matched_stop_str"] = matched matched_stop_kwargs["matched_stop_str"] = matched
# Convert output logprobs if present # Convert output logprobs if present
output_logprobs_proto = self._convert_logprobs_to_proto( output_logprobs_proto = self._convert_output_logprobs_to_proto(
output.get("output_logprobs") output.get("output_logprobs")
) )
# Convert input logprobs if present # Convert input logprobs if present
input_logprobs_proto = self._convert_logprobs_to_proto( input_logprobs_proto = self._convert_input_logprobs_to_proto(
output.get("input_logprobs") output.get("input_logprobs")
) )
......
...@@ -175,13 +175,13 @@ message GenerateStreamChunk { ...@@ -175,13 +175,13 @@ message GenerateStreamChunk {
int32 cached_tokens = 4; int32 cached_tokens = 4;
// Output logprobs (if requested) - incremental for streaming // Output logprobs (if requested) - incremental for streaming
LogProbs output_logprobs = 5; OutputLogProbs output_logprobs = 5;
// Hidden states (if requested) // Hidden states (if requested)
repeated float hidden_states = 6; repeated float hidden_states = 6;
// Input logprobs (if requested) - only in first chunk // Input logprobs (if requested) - only in first chunk
LogProbs input_logprobs = 7; InputLogProbs input_logprobs = 7;
} }
message GenerateComplete { message GenerateComplete {
...@@ -197,7 +197,7 @@ message GenerateComplete { ...@@ -197,7 +197,7 @@ message GenerateComplete {
int32 cached_tokens = 5; int32 cached_tokens = 5;
// Output logprobs if requested (cumulative) // Output logprobs if requested (cumulative)
LogProbs output_logprobs = 6; OutputLogProbs output_logprobs = 6;
// All hidden states if requested // All hidden states if requested
repeated HiddenStates all_hidden_states = 7; repeated HiddenStates all_hidden_states = 7;
...@@ -209,7 +209,7 @@ message GenerateComplete { ...@@ -209,7 +209,7 @@ message GenerateComplete {
} }
// Input logprobs if requested (for prompt tokens) // Input logprobs if requested (for prompt tokens)
LogProbs input_logprobs = 10; InputLogProbs input_logprobs = 10;
} }
message GenerateError { message GenerateError {
...@@ -218,7 +218,8 @@ message GenerateError { ...@@ -218,7 +218,8 @@ message GenerateError {
string details = 3; string details = 3;
} }
message LogProbs { // Output logprobs - all values are present (no None)
message OutputLogProbs {
repeated float token_logprobs = 1; repeated float token_logprobs = 1;
repeated int32 token_ids = 2; repeated int32 token_ids = 2;
...@@ -226,6 +227,20 @@ message LogProbs { ...@@ -226,6 +227,20 @@ message LogProbs {
repeated TopLogProbs top_logprobs = 3; repeated TopLogProbs top_logprobs = 3;
} }
// Input logprobs - first token has no logprob (None)
message InputLogProbs {
repeated InputTokenLogProb token_logprobs = 1;
repeated int32 token_ids = 2;
// Top logprobs at each position
repeated TopLogProbs top_logprobs = 3;
}
// Wrapper to represent optional logprob (first input token has no logprob)
message InputTokenLogProb {
optional float value = 1;
}
message TopLogProbs { message TopLogProbs {
repeated float values = 1; repeated float values = 1;
repeated int32 token_ids = 2; repeated int32 token_ids = 2;
......
...@@ -29,7 +29,7 @@ from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__ ...@@ -29,7 +29,7 @@ from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16sglang_scheduler.proto\x12\x15sglang.grpc.scheduler\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xe1\x05\n\x0eSamplingParams\x12\x13\n\x0btemperature\x18\x01 \x01(\x02\x12\r\n\x05top_p\x18\x02 \x01(\x02\x12\r\n\x05top_k\x18\x03 \x01(\x05\x12\r\n\x05min_p\x18\x04 \x01(\x02\x12\x19\n\x11\x66requency_penalty\x18\x05 \x01(\x02\x12\x18\n\x10presence_penalty\x18\x06 \x01(\x02\x12\x1a\n\x12repetition_penalty\x18\x07 \x01(\x02\x12\x1b\n\x0emax_new_tokens\x18\x08 \x01(\x05H\x01\x88\x01\x01\x12\x0c\n\x04stop\x18\t \x03(\t\x12\x16\n\x0estop_token_ids\x18\n \x03(\r\x12\x1b\n\x13skip_special_tokens\x18\x0b \x01(\x08\x12%\n\x1dspaces_between_special_tokens\x18\x0c \x01(\x08\x12\x0f\n\x05regex\x18\r \x01(\tH\x00\x12\x15\n\x0bjson_schema\x18\x0e \x01(\tH\x00\x12\x16\n\x0c\x65\x62nf_grammar\x18\x0f \x01(\tH\x00\x12\x18\n\x0estructural_tag\x18\x10 \x01(\tH\x00\x12\x11\n\tlora_path\x18\x11 \x01(\t\x12\t\n\x01n\x18\x12 \x01(\x05\x12\x15\n\rtoken_healing\x18\x13 \x01(\x08\x12\x16\n\x0emin_new_tokens\x18\x14 \x01(\x05\x12\x12\n\nignore_eos\x18\x15 \x01(\x08\x12\x14\n\x0cno_stop_trim\x18\x16 \x01(\x08\x12\x17\n\x0fstream_interval\x18\x17 \x01(\x05\x12H\n\nlogit_bias\x18\x18 \x03(\x0b\x32\x34.sglang.grpc.scheduler.SamplingParams.LogitBiasEntry\x12.\n\rcustom_params\x18\x19 \x01(\x0b\x32\x17.google.protobuf.Struct\x1a\x30\n\x0eLogitBiasEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x42\x0c\n\nconstraintB\x11\n\x0f_max_new_tokens\"]\n\x13\x44isaggregatedParams\x12\x16\n\x0e\x62ootstrap_host\x18\x01 \x01(\t\x12\x16\n\x0e\x62ootstrap_port\x18\x02 \x01(\x05\x12\x16\n\x0e\x62ootstrap_room\x18\x03 \x01(\x05\"\xf9\x04\n\x0fGenerateRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x38\n\ttokenized\x18\x02 \x01(\x0b\x32%.sglang.grpc.scheduler.TokenizedInput\x12:\n\tmm_inputs\x18\x03 \x01(\x0b\x32\'.sglang.grpc.scheduler.MultimodalInputs\x12>\n\x0fsampling_params\x18\x04 \x01(\x0b\x32%.sglang.grpc.scheduler.SamplingParams\x12\x16\n\x0ereturn_logprob\x18\x05 \x01(\x08\x12\x19\n\x11logprob_start_len\x18\x06 \x01(\x05\x12\x18\n\x10top_logprobs_num\x18\x07 \x01(\x05\x12\x19\n\x11token_ids_logprob\x18\x08 \x03(\r\x12\x1c\n\x14return_hidden_states\x18\t \x01(\x08\x12H\n\x14\x64isaggregated_params\x18\n \x01(\x0b\x32*.sglang.grpc.scheduler.DisaggregatedParams\x12\x1e\n\x16\x63ustom_logit_processor\x18\x0b \x01(\t\x12-\n\ttimestamp\x18\x0c \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x13\n\x0blog_metrics\x18\r \x01(\x08\x12\x14\n\x0cinput_embeds\x18\x0e \x03(\x02\x12\x0f\n\x07lora_id\x18\x0f \x01(\t\x12\x1a\n\x12\x64\x61ta_parallel_rank\x18\x10 \x01(\x05\x12\x15\n\rdp_balance_id\x18\x11 \x01(\x05\x12\x0e\n\x06stream\x18\x12 \x01(\x08\":\n\x0eTokenizedInput\x12\x15\n\roriginal_text\x18\x01 \x01(\t\x12\x11\n\tinput_ids\x18\x02 \x03(\r\"\xd3\x01\n\x10MultimodalInputs\x12\x12\n\nimage_urls\x18\x01 \x03(\t\x12\x12\n\nvideo_urls\x18\x02 \x03(\t\x12\x12\n\naudio_urls\x18\x03 \x03(\t\x12\x33\n\x12processed_features\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x12\n\nimage_data\x18\x05 \x03(\x0c\x12\x12\n\nvideo_data\x18\x06 \x03(\x0c\x12\x12\n\naudio_data\x18\x07 \x03(\x0c\x12\x12\n\nmodalities\x18\x08 \x03(\t\"\xe3\x01\n\x10GenerateResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12;\n\x05\x63hunk\x18\x02 \x01(\x0b\x32*.sglang.grpc.scheduler.GenerateStreamChunkH\x00\x12;\n\x08\x63omplete\x18\x03 \x01(\x0b\x32\'.sglang.grpc.scheduler.GenerateCompleteH\x00\x12\x35\n\x05\x65rror\x18\x04 \x01(\x0b\x32$.sglang.grpc.scheduler.GenerateErrorH\x00\x42\n\n\x08response\"\xfb\x01\n\x13GenerateStreamChunk\x12\x11\n\ttoken_ids\x18\x01 \x03(\r\x12\x15\n\rprompt_tokens\x18\x02 \x01(\x05\x12\x19\n\x11\x63ompletion_tokens\x18\x03 \x01(\x05\x12\x15\n\rcached_tokens\x18\x04 \x01(\x05\x12\x38\n\x0foutput_logprobs\x18\x05 \x01(\x0b\x32\x1f.sglang.grpc.scheduler.LogProbs\x12\x15\n\rhidden_states\x18\x06 \x03(\x02\x12\x37\n\x0einput_logprobs\x18\x07 \x01(\x0b\x32\x1f.sglang.grpc.scheduler.LogProbs\"\x81\x03\n\x10GenerateComplete\x12\x12\n\noutput_ids\x18\x01 \x03(\r\x12\x15\n\rfinish_reason\x18\x02 \x01(\t\x12\x15\n\rprompt_tokens\x18\x03 \x01(\x05\x12\x19\n\x11\x63ompletion_tokens\x18\x04 \x01(\x05\x12\x15\n\rcached_tokens\x18\x05 \x01(\x05\x12\x38\n\x0foutput_logprobs\x18\x06 \x01(\x0b\x32\x1f.sglang.grpc.scheduler.LogProbs\x12>\n\x11\x61ll_hidden_states\x18\x07 \x03(\x0b\x32#.sglang.grpc.scheduler.HiddenStates\x12\x1a\n\x10matched_token_id\x18\x08 \x01(\rH\x00\x12\x1a\n\x10matched_stop_str\x18\t \x01(\tH\x00\x12\x37\n\x0einput_logprobs\x18\n \x01(\x0b\x32\x1f.sglang.grpc.scheduler.LogProbsB\x0e\n\x0cmatched_stop\"K\n\rGenerateError\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x18\n\x10http_status_code\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"o\n\x08LogProbs\x12\x16\n\x0etoken_logprobs\x18\x01 \x03(\x02\x12\x11\n\ttoken_ids\x18\x02 \x03(\x05\x12\x38\n\x0ctop_logprobs\x18\x03 \x03(\x0b\x32\".sglang.grpc.scheduler.TopLogProbs\"0\n\x0bTopLogProbs\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\x11\n\ttoken_ids\x18\x02 \x03(\x05\"?\n\x0cHiddenStates\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\r\n\x05layer\x18\x02 \x01(\x05\x12\x10\n\x08position\x18\x03 \x01(\x05\"\xca\x02\n\x0c\x45mbedRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x38\n\ttokenized\x18\x02 \x01(\x0b\x32%.sglang.grpc.scheduler.TokenizedInput\x12:\n\tmm_inputs\x18\x04 \x01(\x0b\x32\'.sglang.grpc.scheduler.MultimodalInputs\x12>\n\x0fsampling_params\x18\x05 \x01(\x0b\x32%.sglang.grpc.scheduler.SamplingParams\x12\x13\n\x0blog_metrics\x18\x06 \x01(\x08\x12\x16\n\x0etoken_type_ids\x18\x07 \x03(\x05\x12\x1a\n\x12\x64\x61ta_parallel_rank\x18\x08 \x01(\x05\x12\x18\n\x10is_cross_encoder\x18\t \x01(\x08\x12\r\n\x05texts\x18\n \x03(\t\"\x9d\x01\n\rEmbedResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x38\n\x08\x63omplete\x18\x02 \x01(\x0b\x32$.sglang.grpc.scheduler.EmbedCompleteH\x00\x12\x32\n\x05\x65rror\x18\x03 \x01(\x0b\x32!.sglang.grpc.scheduler.EmbedErrorH\x00\x42\n\n\x08response\"\xa3\x01\n\rEmbedComplete\x12\x11\n\tembedding\x18\x01 \x03(\x02\x12\x15\n\rprompt_tokens\x18\x02 \x01(\x05\x12\x15\n\rcached_tokens\x18\x03 \x01(\x05\x12\x15\n\rembedding_dim\x18\x04 \x01(\x05\x12:\n\x10\x62\x61tch_embeddings\x18\x05 \x03(\x0b\x32 .sglang.grpc.scheduler.Embedding\"*\n\tEmbedding\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\r\n\x05index\x18\x02 \x01(\x05\"<\n\nEmbedError\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"N\n\x12HealthCheckRequest\x12\x38\n\ttokenized\x18\x01 \x01(\x0b\x32%.sglang.grpc.scheduler.TokenizedInput\"7\n\x13HealthCheckResponse\x12\x0f\n\x07healthy\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"2\n\x0c\x41\x62ortRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0e\n\x06reason\x18\x02 \x01(\t\"1\n\rAbortResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"I\n\x0fLoadLoRARequest\x12\x12\n\nadapter_id\x18\x01 \x01(\t\x12\x14\n\x0c\x61\x64\x61pter_path\x18\x02 \x01(\t\x12\x0c\n\x04rank\x18\x03 \x01(\x05\"H\n\x10LoadLoRAResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x12\n\nadapter_id\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\"\'\n\x11UnloadLoRARequest\x12\x12\n\nadapter_id\x18\x01 \x01(\t\"6\n\x12UnloadLoRAResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"w\n\x14UpdateWeightsRequest\x12\x13\n\tdisk_path\x18\x01 \x01(\tH\x00\x12\x15\n\x0btensor_data\x18\x02 \x01(\x0cH\x00\x12\x14\n\nremote_url\x18\x03 \x01(\tH\x00\x12\x13\n\x0bweight_name\x18\x04 \x01(\tB\x08\n\x06source\"9\n\x15UpdateWeightsResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"-\n\x17GetInternalStateRequest\x12\x12\n\nstate_keys\x18\x01 \x03(\t\"B\n\x18GetInternalStateResponse\x12&\n\x05state\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"A\n\x17SetInternalStateRequest\x12&\n\x05state\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"<\n\x18SetInternalStateResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t2\xfe\x02\n\x0fSglangScheduler\x12]\n\x08Generate\x12&.sglang.grpc.scheduler.GenerateRequest\x1a\'.sglang.grpc.scheduler.GenerateResponse0\x01\x12R\n\x05\x45mbed\x12#.sglang.grpc.scheduler.EmbedRequest\x1a$.sglang.grpc.scheduler.EmbedResponse\x12\x64\n\x0bHealthCheck\x12).sglang.grpc.scheduler.HealthCheckRequest\x1a*.sglang.grpc.scheduler.HealthCheckResponse\x12R\n\x05\x41\x62ort\x12#.sglang.grpc.scheduler.AbortRequest\x1a$.sglang.grpc.scheduler.AbortResponseb\x06proto3') DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16sglang_scheduler.proto\x12\x15sglang.grpc.scheduler\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xe1\x05\n\x0eSamplingParams\x12\x13\n\x0btemperature\x18\x01 \x01(\x02\x12\r\n\x05top_p\x18\x02 \x01(\x02\x12\r\n\x05top_k\x18\x03 \x01(\x05\x12\r\n\x05min_p\x18\x04 \x01(\x02\x12\x19\n\x11\x66requency_penalty\x18\x05 \x01(\x02\x12\x18\n\x10presence_penalty\x18\x06 \x01(\x02\x12\x1a\n\x12repetition_penalty\x18\x07 \x01(\x02\x12\x1b\n\x0emax_new_tokens\x18\x08 \x01(\x05H\x01\x88\x01\x01\x12\x0c\n\x04stop\x18\t \x03(\t\x12\x16\n\x0estop_token_ids\x18\n \x03(\r\x12\x1b\n\x13skip_special_tokens\x18\x0b \x01(\x08\x12%\n\x1dspaces_between_special_tokens\x18\x0c \x01(\x08\x12\x0f\n\x05regex\x18\r \x01(\tH\x00\x12\x15\n\x0bjson_schema\x18\x0e \x01(\tH\x00\x12\x16\n\x0c\x65\x62nf_grammar\x18\x0f \x01(\tH\x00\x12\x18\n\x0estructural_tag\x18\x10 \x01(\tH\x00\x12\x11\n\tlora_path\x18\x11 \x01(\t\x12\t\n\x01n\x18\x12 \x01(\x05\x12\x15\n\rtoken_healing\x18\x13 \x01(\x08\x12\x16\n\x0emin_new_tokens\x18\x14 \x01(\x05\x12\x12\n\nignore_eos\x18\x15 \x01(\x08\x12\x14\n\x0cno_stop_trim\x18\x16 \x01(\x08\x12\x17\n\x0fstream_interval\x18\x17 \x01(\x05\x12H\n\nlogit_bias\x18\x18 \x03(\x0b\x32\x34.sglang.grpc.scheduler.SamplingParams.LogitBiasEntry\x12.\n\rcustom_params\x18\x19 \x01(\x0b\x32\x17.google.protobuf.Struct\x1a\x30\n\x0eLogitBiasEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x02:\x02\x38\x01\x42\x0c\n\nconstraintB\x11\n\x0f_max_new_tokens\"]\n\x13\x44isaggregatedParams\x12\x16\n\x0e\x62ootstrap_host\x18\x01 \x01(\t\x12\x16\n\x0e\x62ootstrap_port\x18\x02 \x01(\x05\x12\x16\n\x0e\x62ootstrap_room\x18\x03 \x01(\x05\"\xf9\x04\n\x0fGenerateRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x38\n\ttokenized\x18\x02 \x01(\x0b\x32%.sglang.grpc.scheduler.TokenizedInput\x12:\n\tmm_inputs\x18\x03 \x01(\x0b\x32\'.sglang.grpc.scheduler.MultimodalInputs\x12>\n\x0fsampling_params\x18\x04 \x01(\x0b\x32%.sglang.grpc.scheduler.SamplingParams\x12\x16\n\x0ereturn_logprob\x18\x05 \x01(\x08\x12\x19\n\x11logprob_start_len\x18\x06 \x01(\x05\x12\x18\n\x10top_logprobs_num\x18\x07 \x01(\x05\x12\x19\n\x11token_ids_logprob\x18\x08 \x03(\r\x12\x1c\n\x14return_hidden_states\x18\t \x01(\x08\x12H\n\x14\x64isaggregated_params\x18\n \x01(\x0b\x32*.sglang.grpc.scheduler.DisaggregatedParams\x12\x1e\n\x16\x63ustom_logit_processor\x18\x0b \x01(\t\x12-\n\ttimestamp\x18\x0c \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x13\n\x0blog_metrics\x18\r \x01(\x08\x12\x14\n\x0cinput_embeds\x18\x0e \x03(\x02\x12\x0f\n\x07lora_id\x18\x0f \x01(\t\x12\x1a\n\x12\x64\x61ta_parallel_rank\x18\x10 \x01(\x05\x12\x15\n\rdp_balance_id\x18\x11 \x01(\x05\x12\x0e\n\x06stream\x18\x12 \x01(\x08\":\n\x0eTokenizedInput\x12\x15\n\roriginal_text\x18\x01 \x01(\t\x12\x11\n\tinput_ids\x18\x02 \x03(\r\"\xd3\x01\n\x10MultimodalInputs\x12\x12\n\nimage_urls\x18\x01 \x03(\t\x12\x12\n\nvideo_urls\x18\x02 \x03(\t\x12\x12\n\naudio_urls\x18\x03 \x03(\t\x12\x33\n\x12processed_features\x18\x04 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x12\n\nimage_data\x18\x05 \x03(\x0c\x12\x12\n\nvideo_data\x18\x06 \x03(\x0c\x12\x12\n\naudio_data\x18\x07 \x03(\x0c\x12\x12\n\nmodalities\x18\x08 \x03(\t\"\xe3\x01\n\x10GenerateResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12;\n\x05\x63hunk\x18\x02 \x01(\x0b\x32*.sglang.grpc.scheduler.GenerateStreamChunkH\x00\x12;\n\x08\x63omplete\x18\x03 \x01(\x0b\x32\'.sglang.grpc.scheduler.GenerateCompleteH\x00\x12\x35\n\x05\x65rror\x18\x04 \x01(\x0b\x32$.sglang.grpc.scheduler.GenerateErrorH\x00\x42\n\n\x08response\"\x86\x02\n\x13GenerateStreamChunk\x12\x11\n\ttoken_ids\x18\x01 \x03(\r\x12\x15\n\rprompt_tokens\x18\x02 \x01(\x05\x12\x19\n\x11\x63ompletion_tokens\x18\x03 \x01(\x05\x12\x15\n\rcached_tokens\x18\x04 \x01(\x05\x12>\n\x0foutput_logprobs\x18\x05 \x01(\x0b\x32%.sglang.grpc.scheduler.OutputLogProbs\x12\x15\n\rhidden_states\x18\x06 \x03(\x02\x12<\n\x0einput_logprobs\x18\x07 \x01(\x0b\x32$.sglang.grpc.scheduler.InputLogProbs\"\x8c\x03\n\x10GenerateComplete\x12\x12\n\noutput_ids\x18\x01 \x03(\r\x12\x15\n\rfinish_reason\x18\x02 \x01(\t\x12\x15\n\rprompt_tokens\x18\x03 \x01(\x05\x12\x19\n\x11\x63ompletion_tokens\x18\x04 \x01(\x05\x12\x15\n\rcached_tokens\x18\x05 \x01(\x05\x12>\n\x0foutput_logprobs\x18\x06 \x01(\x0b\x32%.sglang.grpc.scheduler.OutputLogProbs\x12>\n\x11\x61ll_hidden_states\x18\x07 \x03(\x0b\x32#.sglang.grpc.scheduler.HiddenStates\x12\x1a\n\x10matched_token_id\x18\x08 \x01(\rH\x00\x12\x1a\n\x10matched_stop_str\x18\t \x01(\tH\x00\x12<\n\x0einput_logprobs\x18\n \x01(\x0b\x32$.sglang.grpc.scheduler.InputLogProbsB\x0e\n\x0cmatched_stop\"K\n\rGenerateError\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x18\n\x10http_status_code\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"u\n\x0eOutputLogProbs\x12\x16\n\x0etoken_logprobs\x18\x01 \x03(\x02\x12\x11\n\ttoken_ids\x18\x02 \x03(\x05\x12\x38\n\x0ctop_logprobs\x18\x03 \x03(\x0b\x32\".sglang.grpc.scheduler.TopLogProbs\"\x9e\x01\n\rInputLogProbs\x12@\n\x0etoken_logprobs\x18\x01 \x03(\x0b\x32(.sglang.grpc.scheduler.InputTokenLogProb\x12\x11\n\ttoken_ids\x18\x02 \x03(\x05\x12\x38\n\x0ctop_logprobs\x18\x03 \x03(\x0b\x32\".sglang.grpc.scheduler.TopLogProbs\"1\n\x11InputTokenLogProb\x12\x12\n\x05value\x18\x01 \x01(\x02H\x00\x88\x01\x01\x42\x08\n\x06_value\"0\n\x0bTopLogProbs\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\x11\n\ttoken_ids\x18\x02 \x03(\x05\"?\n\x0cHiddenStates\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\r\n\x05layer\x18\x02 \x01(\x05\x12\x10\n\x08position\x18\x03 \x01(\x05\"\xca\x02\n\x0c\x45mbedRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x38\n\ttokenized\x18\x02 \x01(\x0b\x32%.sglang.grpc.scheduler.TokenizedInput\x12:\n\tmm_inputs\x18\x04 \x01(\x0b\x32\'.sglang.grpc.scheduler.MultimodalInputs\x12>\n\x0fsampling_params\x18\x05 \x01(\x0b\x32%.sglang.grpc.scheduler.SamplingParams\x12\x13\n\x0blog_metrics\x18\x06 \x01(\x08\x12\x16\n\x0etoken_type_ids\x18\x07 \x03(\x05\x12\x1a\n\x12\x64\x61ta_parallel_rank\x18\x08 \x01(\x05\x12\x18\n\x10is_cross_encoder\x18\t \x01(\x08\x12\r\n\x05texts\x18\n \x03(\t\"\x9d\x01\n\rEmbedResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x38\n\x08\x63omplete\x18\x02 \x01(\x0b\x32$.sglang.grpc.scheduler.EmbedCompleteH\x00\x12\x32\n\x05\x65rror\x18\x03 \x01(\x0b\x32!.sglang.grpc.scheduler.EmbedErrorH\x00\x42\n\n\x08response\"\xa3\x01\n\rEmbedComplete\x12\x11\n\tembedding\x18\x01 \x03(\x02\x12\x15\n\rprompt_tokens\x18\x02 \x01(\x05\x12\x15\n\rcached_tokens\x18\x03 \x01(\x05\x12\x15\n\rembedding_dim\x18\x04 \x01(\x05\x12:\n\x10\x62\x61tch_embeddings\x18\x05 \x03(\x0b\x32 .sglang.grpc.scheduler.Embedding\"*\n\tEmbedding\x12\x0e\n\x06values\x18\x01 \x03(\x02\x12\r\n\x05index\x18\x02 \x01(\x05\"<\n\nEmbedError\x12\x0f\n\x07message\x18\x01 \x01(\t\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\x12\x0f\n\x07\x64\x65tails\x18\x03 \x01(\t\"N\n\x12HealthCheckRequest\x12\x38\n\ttokenized\x18\x01 \x01(\x0b\x32%.sglang.grpc.scheduler.TokenizedInput\"7\n\x13HealthCheckResponse\x12\x0f\n\x07healthy\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"2\n\x0c\x41\x62ortRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0e\n\x06reason\x18\x02 \x01(\t\"1\n\rAbortResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"I\n\x0fLoadLoRARequest\x12\x12\n\nadapter_id\x18\x01 \x01(\t\x12\x14\n\x0c\x61\x64\x61pter_path\x18\x02 \x01(\t\x12\x0c\n\x04rank\x18\x03 \x01(\x05\"H\n\x10LoadLoRAResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x12\n\nadapter_id\x18\x02 \x01(\t\x12\x0f\n\x07message\x18\x03 \x01(\t\"\'\n\x11UnloadLoRARequest\x12\x12\n\nadapter_id\x18\x01 \x01(\t\"6\n\x12UnloadLoRAResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"w\n\x14UpdateWeightsRequest\x12\x13\n\tdisk_path\x18\x01 \x01(\tH\x00\x12\x15\n\x0btensor_data\x18\x02 \x01(\x0cH\x00\x12\x14\n\nremote_url\x18\x03 \x01(\tH\x00\x12\x13\n\x0bweight_name\x18\x04 \x01(\tB\x08\n\x06source\"9\n\x15UpdateWeightsResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t\"-\n\x17GetInternalStateRequest\x12\x12\n\nstate_keys\x18\x01 \x03(\t\"B\n\x18GetInternalStateResponse\x12&\n\x05state\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"A\n\x17SetInternalStateRequest\x12&\n\x05state\x18\x01 \x01(\x0b\x32\x17.google.protobuf.Struct\"<\n\x18SetInternalStateResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x0f\n\x07message\x18\x02 \x01(\t2\xfe\x02\n\x0fSglangScheduler\x12]\n\x08Generate\x12&.sglang.grpc.scheduler.GenerateRequest\x1a\'.sglang.grpc.scheduler.GenerateResponse0\x01\x12R\n\x05\x45mbed\x12#.sglang.grpc.scheduler.EmbedRequest\x1a$.sglang.grpc.scheduler.EmbedResponse\x12\x64\n\x0bHealthCheck\x12).sglang.grpc.scheduler.HealthCheckRequest\x1a*.sglang.grpc.scheduler.HealthCheckResponse\x12R\n\x05\x41\x62ort\x12#.sglang.grpc.scheduler.AbortRequest\x1a$.sglang.grpc.scheduler.AbortResponseb\x06proto3')
_globals = globals() _globals = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
...@@ -53,55 +53,59 @@ if not _descriptor._USE_C_DESCRIPTORS: ...@@ -53,55 +53,59 @@ if not _descriptor._USE_C_DESCRIPTORS:
_globals['_GENERATERESPONSE']._serialized_start=1858 _globals['_GENERATERESPONSE']._serialized_start=1858
_globals['_GENERATERESPONSE']._serialized_end=2085 _globals['_GENERATERESPONSE']._serialized_end=2085
_globals['_GENERATESTREAMCHUNK']._serialized_start=2088 _globals['_GENERATESTREAMCHUNK']._serialized_start=2088
_globals['_GENERATESTREAMCHUNK']._serialized_end=2339 _globals['_GENERATESTREAMCHUNK']._serialized_end=2350
_globals['_GENERATECOMPLETE']._serialized_start=2342 _globals['_GENERATECOMPLETE']._serialized_start=2353
_globals['_GENERATECOMPLETE']._serialized_end=2727 _globals['_GENERATECOMPLETE']._serialized_end=2749
_globals['_GENERATEERROR']._serialized_start=2729 _globals['_GENERATEERROR']._serialized_start=2751
_globals['_GENERATEERROR']._serialized_end=2804 _globals['_GENERATEERROR']._serialized_end=2826
_globals['_LOGPROBS']._serialized_start=2806 _globals['_OUTPUTLOGPROBS']._serialized_start=2828
_globals['_LOGPROBS']._serialized_end=2917 _globals['_OUTPUTLOGPROBS']._serialized_end=2945
_globals['_TOPLOGPROBS']._serialized_start=2919 _globals['_INPUTLOGPROBS']._serialized_start=2948
_globals['_TOPLOGPROBS']._serialized_end=2967 _globals['_INPUTLOGPROBS']._serialized_end=3106
_globals['_HIDDENSTATES']._serialized_start=2969 _globals['_INPUTTOKENLOGPROB']._serialized_start=3108
_globals['_HIDDENSTATES']._serialized_end=3032 _globals['_INPUTTOKENLOGPROB']._serialized_end=3157
_globals['_EMBEDREQUEST']._serialized_start=3035 _globals['_TOPLOGPROBS']._serialized_start=3159
_globals['_EMBEDREQUEST']._serialized_end=3365 _globals['_TOPLOGPROBS']._serialized_end=3207
_globals['_EMBEDRESPONSE']._serialized_start=3368 _globals['_HIDDENSTATES']._serialized_start=3209
_globals['_EMBEDRESPONSE']._serialized_end=3525 _globals['_HIDDENSTATES']._serialized_end=3272
_globals['_EMBEDCOMPLETE']._serialized_start=3528 _globals['_EMBEDREQUEST']._serialized_start=3275
_globals['_EMBEDCOMPLETE']._serialized_end=3691 _globals['_EMBEDREQUEST']._serialized_end=3605
_globals['_EMBEDDING']._serialized_start=3693 _globals['_EMBEDRESPONSE']._serialized_start=3608
_globals['_EMBEDDING']._serialized_end=3735 _globals['_EMBEDRESPONSE']._serialized_end=3765
_globals['_EMBEDERROR']._serialized_start=3737 _globals['_EMBEDCOMPLETE']._serialized_start=3768
_globals['_EMBEDERROR']._serialized_end=3797 _globals['_EMBEDCOMPLETE']._serialized_end=3931
_globals['_HEALTHCHECKREQUEST']._serialized_start=3799 _globals['_EMBEDDING']._serialized_start=3933
_globals['_HEALTHCHECKREQUEST']._serialized_end=3877 _globals['_EMBEDDING']._serialized_end=3975
_globals['_HEALTHCHECKRESPONSE']._serialized_start=3879 _globals['_EMBEDERROR']._serialized_start=3977
_globals['_HEALTHCHECKRESPONSE']._serialized_end=3934 _globals['_EMBEDERROR']._serialized_end=4037
_globals['_ABORTREQUEST']._serialized_start=3936 _globals['_HEALTHCHECKREQUEST']._serialized_start=4039
_globals['_ABORTREQUEST']._serialized_end=3986 _globals['_HEALTHCHECKREQUEST']._serialized_end=4117
_globals['_ABORTRESPONSE']._serialized_start=3988 _globals['_HEALTHCHECKRESPONSE']._serialized_start=4119
_globals['_ABORTRESPONSE']._serialized_end=4037 _globals['_HEALTHCHECKRESPONSE']._serialized_end=4174
_globals['_LOADLORAREQUEST']._serialized_start=4039 _globals['_ABORTREQUEST']._serialized_start=4176
_globals['_LOADLORAREQUEST']._serialized_end=4112 _globals['_ABORTREQUEST']._serialized_end=4226
_globals['_LOADLORARESPONSE']._serialized_start=4114 _globals['_ABORTRESPONSE']._serialized_start=4228
_globals['_LOADLORARESPONSE']._serialized_end=4186 _globals['_ABORTRESPONSE']._serialized_end=4277
_globals['_UNLOADLORAREQUEST']._serialized_start=4188 _globals['_LOADLORAREQUEST']._serialized_start=4279
_globals['_UNLOADLORAREQUEST']._serialized_end=4227 _globals['_LOADLORAREQUEST']._serialized_end=4352
_globals['_UNLOADLORARESPONSE']._serialized_start=4229 _globals['_LOADLORARESPONSE']._serialized_start=4354
_globals['_UNLOADLORARESPONSE']._serialized_end=4283 _globals['_LOADLORARESPONSE']._serialized_end=4426
_globals['_UPDATEWEIGHTSREQUEST']._serialized_start=4285 _globals['_UNLOADLORAREQUEST']._serialized_start=4428
_globals['_UPDATEWEIGHTSREQUEST']._serialized_end=4404 _globals['_UNLOADLORAREQUEST']._serialized_end=4467
_globals['_UPDATEWEIGHTSRESPONSE']._serialized_start=4406 _globals['_UNLOADLORARESPONSE']._serialized_start=4469
_globals['_UPDATEWEIGHTSRESPONSE']._serialized_end=4463 _globals['_UNLOADLORARESPONSE']._serialized_end=4523
_globals['_GETINTERNALSTATEREQUEST']._serialized_start=4465 _globals['_UPDATEWEIGHTSREQUEST']._serialized_start=4525
_globals['_GETINTERNALSTATEREQUEST']._serialized_end=4510 _globals['_UPDATEWEIGHTSREQUEST']._serialized_end=4644
_globals['_GETINTERNALSTATERESPONSE']._serialized_start=4512 _globals['_UPDATEWEIGHTSRESPONSE']._serialized_start=4646
_globals['_GETINTERNALSTATERESPONSE']._serialized_end=4578 _globals['_UPDATEWEIGHTSRESPONSE']._serialized_end=4703
_globals['_SETINTERNALSTATEREQUEST']._serialized_start=4580 _globals['_GETINTERNALSTATEREQUEST']._serialized_start=4705
_globals['_SETINTERNALSTATEREQUEST']._serialized_end=4645 _globals['_GETINTERNALSTATEREQUEST']._serialized_end=4750
_globals['_SETINTERNALSTATERESPONSE']._serialized_start=4647 _globals['_GETINTERNALSTATERESPONSE']._serialized_start=4752
_globals['_SETINTERNALSTATERESPONSE']._serialized_end=4707 _globals['_GETINTERNALSTATERESPONSE']._serialized_end=4818
_globals['_SGLANGSCHEDULER']._serialized_start=4710 _globals['_SETINTERNALSTATEREQUEST']._serialized_start=4820
_globals['_SGLANGSCHEDULER']._serialized_end=5092 _globals['_SETINTERNALSTATEREQUEST']._serialized_end=4885
_globals['_SETINTERNALSTATERESPONSE']._serialized_start=4887
_globals['_SETINTERNALSTATERESPONSE']._serialized_end=4947
_globals['_SGLANGSCHEDULER']._serialized_start=4950
_globals['_SGLANGSCHEDULER']._serialized_end=5332
# @@protoc_insertion_point(module_scope) # @@protoc_insertion_point(module_scope)
...@@ -174,10 +174,10 @@ class GenerateStreamChunk(_message.Message): ...@@ -174,10 +174,10 @@ class GenerateStreamChunk(_message.Message):
prompt_tokens: int prompt_tokens: int
completion_tokens: int completion_tokens: int
cached_tokens: int cached_tokens: int
output_logprobs: LogProbs output_logprobs: OutputLogProbs
hidden_states: _containers.RepeatedScalarFieldContainer[float] hidden_states: _containers.RepeatedScalarFieldContainer[float]
input_logprobs: LogProbs input_logprobs: InputLogProbs
def __init__(self, token_ids: _Optional[_Iterable[int]] = ..., prompt_tokens: _Optional[int] = ..., completion_tokens: _Optional[int] = ..., cached_tokens: _Optional[int] = ..., output_logprobs: _Optional[_Union[LogProbs, _Mapping]] = ..., hidden_states: _Optional[_Iterable[float]] = ..., input_logprobs: _Optional[_Union[LogProbs, _Mapping]] = ...) -> None: ... def __init__(self, token_ids: _Optional[_Iterable[int]] = ..., prompt_tokens: _Optional[int] = ..., completion_tokens: _Optional[int] = ..., cached_tokens: _Optional[int] = ..., output_logprobs: _Optional[_Union[OutputLogProbs, _Mapping]] = ..., hidden_states: _Optional[_Iterable[float]] = ..., input_logprobs: _Optional[_Union[InputLogProbs, _Mapping]] = ...) -> None: ...
class GenerateComplete(_message.Message): class GenerateComplete(_message.Message):
__slots__ = ("output_ids", "finish_reason", "prompt_tokens", "completion_tokens", "cached_tokens", "output_logprobs", "all_hidden_states", "matched_token_id", "matched_stop_str", "input_logprobs") __slots__ = ("output_ids", "finish_reason", "prompt_tokens", "completion_tokens", "cached_tokens", "output_logprobs", "all_hidden_states", "matched_token_id", "matched_stop_str", "input_logprobs")
...@@ -196,12 +196,12 @@ class GenerateComplete(_message.Message): ...@@ -196,12 +196,12 @@ class GenerateComplete(_message.Message):
prompt_tokens: int prompt_tokens: int
completion_tokens: int completion_tokens: int
cached_tokens: int cached_tokens: int
output_logprobs: LogProbs output_logprobs: OutputLogProbs
all_hidden_states: _containers.RepeatedCompositeFieldContainer[HiddenStates] all_hidden_states: _containers.RepeatedCompositeFieldContainer[HiddenStates]
matched_token_id: int matched_token_id: int
matched_stop_str: str matched_stop_str: str
input_logprobs: LogProbs input_logprobs: InputLogProbs
def __init__(self, output_ids: _Optional[_Iterable[int]] = ..., finish_reason: _Optional[str] = ..., prompt_tokens: _Optional[int] = ..., completion_tokens: _Optional[int] = ..., cached_tokens: _Optional[int] = ..., output_logprobs: _Optional[_Union[LogProbs, _Mapping]] = ..., all_hidden_states: _Optional[_Iterable[_Union[HiddenStates, _Mapping]]] = ..., matched_token_id: _Optional[int] = ..., matched_stop_str: _Optional[str] = ..., input_logprobs: _Optional[_Union[LogProbs, _Mapping]] = ...) -> None: ... def __init__(self, output_ids: _Optional[_Iterable[int]] = ..., finish_reason: _Optional[str] = ..., prompt_tokens: _Optional[int] = ..., completion_tokens: _Optional[int] = ..., cached_tokens: _Optional[int] = ..., output_logprobs: _Optional[_Union[OutputLogProbs, _Mapping]] = ..., all_hidden_states: _Optional[_Iterable[_Union[HiddenStates, _Mapping]]] = ..., matched_token_id: _Optional[int] = ..., matched_stop_str: _Optional[str] = ..., input_logprobs: _Optional[_Union[InputLogProbs, _Mapping]] = ...) -> None: ...
class GenerateError(_message.Message): class GenerateError(_message.Message):
__slots__ = ("message", "http_status_code", "details") __slots__ = ("message", "http_status_code", "details")
...@@ -213,7 +213,7 @@ class GenerateError(_message.Message): ...@@ -213,7 +213,7 @@ class GenerateError(_message.Message):
details: str details: str
def __init__(self, message: _Optional[str] = ..., http_status_code: _Optional[str] = ..., details: _Optional[str] = ...) -> None: ... def __init__(self, message: _Optional[str] = ..., http_status_code: _Optional[str] = ..., details: _Optional[str] = ...) -> None: ...
class LogProbs(_message.Message): class OutputLogProbs(_message.Message):
__slots__ = ("token_logprobs", "token_ids", "top_logprobs") __slots__ = ("token_logprobs", "token_ids", "top_logprobs")
TOKEN_LOGPROBS_FIELD_NUMBER: _ClassVar[int] TOKEN_LOGPROBS_FIELD_NUMBER: _ClassVar[int]
TOKEN_IDS_FIELD_NUMBER: _ClassVar[int] TOKEN_IDS_FIELD_NUMBER: _ClassVar[int]
...@@ -223,6 +223,22 @@ class LogProbs(_message.Message): ...@@ -223,6 +223,22 @@ class LogProbs(_message.Message):
top_logprobs: _containers.RepeatedCompositeFieldContainer[TopLogProbs] top_logprobs: _containers.RepeatedCompositeFieldContainer[TopLogProbs]
def __init__(self, token_logprobs: _Optional[_Iterable[float]] = ..., token_ids: _Optional[_Iterable[int]] = ..., top_logprobs: _Optional[_Iterable[_Union[TopLogProbs, _Mapping]]] = ...) -> None: ... def __init__(self, token_logprobs: _Optional[_Iterable[float]] = ..., token_ids: _Optional[_Iterable[int]] = ..., top_logprobs: _Optional[_Iterable[_Union[TopLogProbs, _Mapping]]] = ...) -> None: ...
class InputLogProbs(_message.Message):
__slots__ = ("token_logprobs", "token_ids", "top_logprobs")
TOKEN_LOGPROBS_FIELD_NUMBER: _ClassVar[int]
TOKEN_IDS_FIELD_NUMBER: _ClassVar[int]
TOP_LOGPROBS_FIELD_NUMBER: _ClassVar[int]
token_logprobs: _containers.RepeatedCompositeFieldContainer[InputTokenLogProb]
token_ids: _containers.RepeatedScalarFieldContainer[int]
top_logprobs: _containers.RepeatedCompositeFieldContainer[TopLogProbs]
def __init__(self, token_logprobs: _Optional[_Iterable[_Union[InputTokenLogProb, _Mapping]]] = ..., token_ids: _Optional[_Iterable[int]] = ..., top_logprobs: _Optional[_Iterable[_Union[TopLogProbs, _Mapping]]] = ...) -> None: ...
class InputTokenLogProb(_message.Message):
__slots__ = ("value",)
VALUE_FIELD_NUMBER: _ClassVar[int]
value: float
def __init__(self, value: _Optional[float] = ...) -> None: ...
class TopLogProbs(_message.Message): class TopLogProbs(_message.Message):
__slots__ = ("values", "token_ids") __slots__ = ("values", "token_ids")
VALUES_FIELD_NUMBER: _ClassVar[int] VALUES_FIELD_NUMBER: _ClassVar[int]
......
...@@ -175,13 +175,13 @@ message GenerateStreamChunk { ...@@ -175,13 +175,13 @@ message GenerateStreamChunk {
int32 cached_tokens = 4; int32 cached_tokens = 4;
// Output logprobs (if requested) - incremental for streaming // Output logprobs (if requested) - incremental for streaming
LogProbs output_logprobs = 5; OutputLogProbs output_logprobs = 5;
// Hidden states (if requested) // Hidden states (if requested)
repeated float hidden_states = 6; repeated float hidden_states = 6;
// Input logprobs (if requested) - only in first chunk // Input logprobs (if requested) - only in first chunk
LogProbs input_logprobs = 7; InputLogProbs input_logprobs = 7;
} }
message GenerateComplete { message GenerateComplete {
...@@ -197,7 +197,7 @@ message GenerateComplete { ...@@ -197,7 +197,7 @@ message GenerateComplete {
int32 cached_tokens = 5; int32 cached_tokens = 5;
// Output logprobs if requested (cumulative) // Output logprobs if requested (cumulative)
LogProbs output_logprobs = 6; OutputLogProbs output_logprobs = 6;
// All hidden states if requested // All hidden states if requested
repeated HiddenStates all_hidden_states = 7; repeated HiddenStates all_hidden_states = 7;
...@@ -209,7 +209,7 @@ message GenerateComplete { ...@@ -209,7 +209,7 @@ message GenerateComplete {
} }
// Input logprobs if requested (for prompt tokens) // Input logprobs if requested (for prompt tokens)
LogProbs input_logprobs = 10; InputLogProbs input_logprobs = 10;
} }
message GenerateError { message GenerateError {
...@@ -218,7 +218,8 @@ message GenerateError { ...@@ -218,7 +218,8 @@ message GenerateError {
string details = 3; string details = 3;
} }
message LogProbs { // Output logprobs - all values are present (no None)
message OutputLogProbs {
repeated float token_logprobs = 1; repeated float token_logprobs = 1;
repeated int32 token_ids = 2; repeated int32 token_ids = 2;
...@@ -226,6 +227,20 @@ message LogProbs { ...@@ -226,6 +227,20 @@ message LogProbs {
repeated TopLogProbs top_logprobs = 3; repeated TopLogProbs top_logprobs = 3;
} }
// Input logprobs - first token has no logprob (None)
message InputLogProbs {
repeated InputTokenLogProb token_logprobs = 1;
repeated int32 token_ids = 2;
// Top logprobs at each position
repeated TopLogProbs top_logprobs = 3;
}
// Wrapper to represent optional logprob (first input token has no logprob)
message InputTokenLogProb {
optional float value = 1;
}
message TopLogProbs { message TopLogProbs {
repeated float values = 1; repeated float values = 1;
repeated int32 token_ids = 2; repeated int32 token_ids = 2;
......
...@@ -1239,7 +1239,7 @@ impl GrpcRouter { ...@@ -1239,7 +1239,7 @@ impl GrpcRouter {
/// Note: Always decodes with skip_special_tokens=false to show actual tokens generated /// Note: Always decodes with skip_special_tokens=false to show actual tokens generated
fn convert_proto_to_openai_logprobs( fn convert_proto_to_openai_logprobs(
&self, &self,
proto_logprobs: &proto::LogProbs, proto_logprobs: &proto::OutputLogProbs,
) -> Result<crate::protocols::spec::ChatLogProbs, String> { ) -> Result<crate::protocols::spec::ChatLogProbs, String> {
let mut content_items = Vec::new(); let mut content_items = Vec::new();
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment