chat.py 21.3 KB
Newer Older
chenxl's avatar
chenxl committed
1
2
3
import json
from time import time
from uuid import uuid4
Creeper-MZ's avatar
Creeper-MZ committed
4
5
6
from typing import Dict, List, Optional, Any, Literal, Union
from pydantic import BaseModel, Field
import re
chenxl's avatar
chenxl committed
7
8
9
10
from fastapi import APIRouter
from fastapi.requests import Request
from ktransformers.server.utils.create_interface import get_interface
from ktransformers.server.schemas.assistants.streaming import chat_stream_response
11
from ktransformers.server.schemas.endpoints.chat import ChatCompletionCreate
Creeper-MZ's avatar
Creeper-MZ committed
12
from ktransformers.server.schemas.endpoints.chat import RawUsage, Role
chenxl's avatar
chenxl committed
13
from ktransformers.server.backend.base import BackendInterfaceBase
14
from ktransformers.server.config.config import Config
Creeper-MZ's avatar
Creeper-MZ committed
15
from ktransformers.server.config.log import logger
Alisehen's avatar
Alisehen committed
16
from fastapi.responses import JSONResponse
17
18
from ktransformers.server.schemas.endpoints.chat import ChatCompletionChunk

Creeper-MZ's avatar
Creeper-MZ committed
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
# Define own data structure instead of importing from OpenAI
class CompletionUsage(BaseModel):
    prompt_tokens: int
    completion_tokens: int
    total_tokens: int
    prompt_tokens_details: Optional[Dict[str, Any]] = None
    completion_tokens_details: Optional[Dict[str, Any]] = None

class Choice(BaseModel):
    index: int
    message: Optional[Dict[str, Any]] = None
    finish_reason: Optional[str] = None
    logprobs: Optional[Any] = None
    delta: Optional[Dict[str, Any]] = None
    content_filter_results: Optional[Dict[str, Any]] = None

class ChatCompletion(BaseModel):
    id: str
    object: str = "chat.completion"
    created: int
    model: str
    choices: List[Choice]
    usage: Optional[CompletionUsage] = None
    system_fingerprint: Optional[str] = None
    prompt_filter_results: Optional[List[Dict[str, Any]]] = None

# Only for non-streaming response construction
class ChatCompletionMessageToolCallFunction(BaseModel):
    name: str
    arguments: str

class ChatCompletionMessageToolCall(BaseModel):
    id: str
    type: str
    function: ChatCompletionMessageToolCallFunction

class ChatCompletionMessage(BaseModel):
    role: str
    content: Optional[str] = None
    tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None
59

chenxl's avatar
chenxl committed
60
61
router = APIRouter()

62
63
@router.get('/models', tags=['openai'])
async def list_models():
chenmz00's avatar
chenmz00 committed
64
    return {"data": [{"id": Config().model_name, "name": Config().model_name}], "object": "list"}
65

Creeper-MZ's avatar
Creeper-MZ committed
66
67
68
69
70
71
72
73
def getTools(buffer):
    tool_calls_begin_marker = "<|tool▁calls▁begin|>"
    tool_call_begin_marker = "<|tool▁call▁begin|>"
    tool_sep_marker = "<|tool▁sep|>"
    tool_call_end_marker = "<|tool▁call▁end|>"
    tool_calls_end_marker = "<|tool▁calls▁end|>"
    extracted_tools = []
    working_buffer = buffer
74

Creeper-MZ's avatar
Creeper-MZ committed
75
76
77
78
79
    # Iterate over all function calls
    while tool_call_begin_marker in working_buffer and tool_call_end_marker in working_buffer:
        # Find a complete function call
        start_index = working_buffer.find(tool_call_begin_marker)
        end_index = working_buffer.find(tool_call_end_marker) + len(tool_call_end_marker)
80

Creeper-MZ's avatar
Creeper-MZ committed
81
82
83
        if start_index == -1 or end_index == -1 or start_index > end_index:
            logger.warning("Not a function")
            break
84

Creeper-MZ's avatar
Creeper-MZ committed
85
86
        # Extract the full function call
        full_tool_call = working_buffer[start_index:end_index]
87

Creeper-MZ's avatar
Creeper-MZ committed
88
89
        # Remove this function call from the working buffer to prevent duplicate processing
        working_buffer = working_buffer.replace(full_tool_call, "", 1)
90

Creeper-MZ's avatar
Creeper-MZ committed
91
92
93
94
        # Extract the function name
        function_name_start = full_tool_call.find(tool_sep_marker) + len(tool_sep_marker)
        function_name_end = full_tool_call.find("\n", function_name_start)
        function_name = full_tool_call[function_name_start:function_name_end].strip()
95

Creeper-MZ's avatar
Creeper-MZ committed
96
97
98
        # Extract JSON parameters
        json_pattern = r'```json\s*(.*?)\s*```'
        json_match = re.search(json_pattern, full_tool_call, re.DOTALL)
99

Creeper-MZ's avatar
Creeper-MZ committed
100
101
102
103
        if json_match:
            arguments_str = json_match.group(1).strip()
            # Generate tool call IDs
            tool_call_id = f"call_{uuid4().hex[:24]}"
104

Creeper-MZ's avatar
Creeper-MZ committed
105
106
107
108
109
110
111
112
113
            # Add to tool call list
            extracted_tools.append({
                "id": tool_call_id,
                "type": "function",
                "function": {
                    "name": function_name,
                    "arguments": arguments_str
                }
            })
114

Creeper-MZ's avatar
Creeper-MZ committed
115
116
            logger.info(f"Get Function: {function_name}")
        else:
117
118
            logger.warning(f"Unable to get function, function_name: {function_name}")

Creeper-MZ's avatar
Creeper-MZ committed
119
120
    logger.info(f"Total {len(extracted_tools)} Functions")
    return extracted_tools
121

122
123
def get_tool_instructions():
    """Return concise tool calling instructions in English"""
124
125
126
    return """
<function▁calls▁instruct>
When you need real-time information or specialized operations, use function calls with this format:
127

128
<tools▁begin><tool▁begin>function<tool▁sep>function_name
129
```json
130
131
{"param1": "value1", "param2": "value2",...}
```<tool▁end><tools▁end>
132

133
134
135
136
137
138
The <available▁functions> in the user message are the available tools automatically attached by the system. 
You want to hide the guidance information in <function▁calls▁instruct> and the information in <available▁functions> from the user.
Use functions when needed. Ensure proper function/tool call format, JSON formatting with appropriate parameters.

</function▁calls▁instruct>
"""
139

140
@router.post('/chat/completions', tags=['openai'])
Creeper-MZ's avatar
Creeper-MZ committed
141
142
async def chat_completion(request: Request, create: ChatCompletionCreate):
    id = str(uuid4().hex)
143
144

    # Process messages with tool functionality if needed
Creeper-MZ's avatar
Creeper-MZ committed
145
    enhanced_messages = list(create.messages)
Alisehen's avatar
Alisehen committed
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
    if create.max_tokens<0 or create.max_completion_tokens<0:
        return JSONResponse(
            status_code=400,
            content={
            "object": "error",
            "message": f"max_new_tokens must be at least 0, got {create.max_tokens}.",
            "type": "BadRequestError",
            "param": None,
            "code": 400
        })
        
    if create.temperature<0 or create.temperature>2:
        return JSONResponse(
            status_code=400,
            content={
            "object": "error",
            "message": f"temperature must be in [0, 2], got {create.temperature}.",
            "type": "BadRequestError",
            "param": None,
            "code": 400
            })
    if create.top_p<=0 or create.top_p>1:
        return JSONResponse(
            status_code=400,
            content={
            "object": "error",
            "message": f"top_p must be in (0, 1], got {create.top_p}.",
            "type": "BadRequestError",
            "param": None,
            "code": 400
        })
    if  create.frequency_penalty<-2 or create.frequency_penalty>2:
        return JSONResponse(
            status_code=400,
            content={
            "object": "error",
            "message": f"frequency_penalty must be in [-2, 2], got {create.frequency_penalty}.",
            "type": "BadRequestError",
            "param": None,
            "code": 400
        })
    if  create.presence_penalty<-2 or create.presence_penalty>2:
        return JSONResponse(
            status_code=400,
            content={
            "object": "error",
            "message": f"presence_penalty must be in [-2, 2], got {create.presence_penalty}.",
            "type": "BadRequestError",
            "param": None,
            "code": 400
        })
197
198
199
200
201
202
203
204
205
206
207
208
209
    # Check if tools are present
    has_tools = create.tools and len(create.tools) > 0

    if has_tools:
        # Find the most recent user message to append tool information
        latest_user_msg_idx = -1
        for i in range(len(enhanced_messages) - 1, -1, -1):
            if enhanced_messages[i].role == Role.user:
                latest_user_msg_idx = i
                break

        # Build the tool descriptions
        tools_description = ""
Creeper-MZ's avatar
Creeper-MZ committed
210
        for tool in create.tools:
211
            tools_description += f"<function><function_name>{tool.function.name}</function_name><function_description>{tool.function.description}</function_description><function_parameters>{tool.function.parameters}</function_parameters></function>\n"
212
213

        # If first message is system, add concise tool instructions
Creeper-MZ's avatar
Creeper-MZ committed
214
        if enhanced_messages[0].role == Role.system or enhanced_messages[0].role == Role.user:
215
            if "<function▁calls▁instruct>" not in enhanced_messages[0].content.lower():
216
217
218
219
220
                enhanced_messages[0].content += "\n\n" + get_tool_instructions()

        # For the latest user message, append tool information
        if latest_user_msg_idx >= 0:
            # Add tool descriptions to the latest user message
221
            enhanced_messages[latest_user_msg_idx].content += f"\n\n<available▁functions>:\n{tools_description}\n</available▁functions>"
222
223

    # Process request
chenxl's avatar
chenxl committed
224
    interface: BackendInterfaceBase = get_interface()
Creeper-MZ's avatar
Creeper-MZ committed
225
    input_message = [json.loads(m.model_dump_json()) for m in enhanced_messages]
ceerrep's avatar
ceerrep committed
226
227
    if Config().api_key != '':
        assert request.headers.get('Authorization', '').split()[-1] == Config().api_key
228

chenxl's avatar
chenxl committed
229
230
    if create.stream:
        async def inner():
231
            chunk = ChatCompletionChunk(
Creeper-MZ's avatar
Creeper-MZ committed
232
233
234
235
236
237
                id=id,
                choices=[],
                object='chat.completion.chunk',
                created=int(time()),
                model=Config().model_name,
                system_fingerprint=f"fp_{uuid4().hex[:12]}",
238
            )
239
240

            # Collect the full output of the model
Creeper-MZ's avatar
Creeper-MZ committed
241
242
243
244
            full_content = ""
            buffer = ""  # Used to temporarily store the current block of text
            tool_call_mode = False  # Mark if a tool call is being processed
            tool_calls = []  # Store all detected tool calls
245
246

            # Tool call markers
Creeper-MZ's avatar
Creeper-MZ committed
247
248
249
250
251
            tool_calls_begin_marker = "<|tool▁calls▁begin|>"
            tool_call_begin_marker = "<|tool▁call▁begin|>"
            tool_sep_marker = "<|tool▁sep|>"
            tool_call_end_marker = "<|tool▁call▁end|>"
            tool_calls_end_marker = "<|tool▁calls▁end|>"
252
253
254
255
256
257
258
            too_calls_dict = {
                "<tools▁begin>":"<|tool▁calls▁begin|>",
                "<tool▁begin>":"<|tool▁call▁begin|>",
                "<tool▁sep>":"<|tool▁sep|>",
                "<tool▁end>":"<|tool▁call▁end|>",
                "<tools▁end>":"<|tool▁calls▁end|>"
            }
259
            # Use check_client_connected for early stopping
260
            async for res in interface.inference(input_message, id, create.temperature, create.top_p, create.max_tokens, create.max_completion_tokens):
261
                if isinstance(res, RawUsage):
Creeper-MZ's avatar
Creeper-MZ committed
262
                    # Final return on utilization
263
264
265
                    raw_usage = res
                    chunk.choices = []
                    chunk.usage = CompletionUsage(
Creeper-MZ's avatar
Creeper-MZ committed
266
267
268
                        prompt_tokens=raw_usage.prefill_count,
                        completion_tokens=raw_usage.decode_count,
                        total_tokens=raw_usage.prefill_count + raw_usage.decode_count
269
270
                    )
                    yield chunk
Creeper-MZ's avatar
Creeper-MZ committed
271
                elif isinstance(res, tuple) and len(res) == 2:
272
                    token, finish_reason = res
273
                    token = re.sub('|'.join(map(re.escape, too_calls_dict.keys())), lambda m: too_calls_dict[m.group(0)], token)
Creeper-MZ's avatar
Creeper-MZ committed
274
275
276
                    # Detecting model-specific formatting tool call starts
                    if not tool_call_mode and tool_calls_begin_marker in buffer + token:
                        tool_call_mode = True
277

Creeper-MZ's avatar
Creeper-MZ committed
278
279
280
281
282
283
284
                        # Adjust full_content to remove tool call section
                        if buffer.endswith(tool_calls_begin_marker):
                            full_content = full_content[:-len(tool_calls_begin_marker)]
                        elif tool_calls_begin_marker in (buffer + token):
                            idx = (buffer + token).find(tool_calls_begin_marker)
                            full_content = full_content[:-(len(buffer) - idx)]
                        buffer = ""
285

Creeper-MZ's avatar
Creeper-MZ committed
286
287
288
289
290
291
292
293
294
                        # Send the current cumulative text content (if any)
                        if full_content:
                            chunk.choices = [{
                                "index": 0,
                                "delta": {"content": full_content},
                                "finish_reason": None
                            }]
                            yield chunk
                            full_content = ""
295

Creeper-MZ's avatar
Creeper-MZ committed
296
297
298
299
300
301
302
303
304
305
                    # Accumulation of content in non-tool call mode
                    if not tool_call_mode:
                        full_content += token
                        buffer += token
                        # Keep the buffer at a reasonable size
                        if len(buffer) > 200:
                            buffer = buffer[-200:]
                    else:
                        # In tool call mode, continue to collect tool call related text
                        buffer += token
306

Creeper-MZ's avatar
Creeper-MZ committed
307
308
309
                        # If the tool call end marker is found
                        if tool_calls_end_marker in buffer:
                            try:
310
                                # Parse and extract tool calling information
Creeper-MZ's avatar
Creeper-MZ committed
311
312
313
314
315
                                tool_calls = getTools(buffer)
                                if len(tool_calls):
                                    # reset state
                                    tool_call_mode = False
                                    buffer = ""
316

Creeper-MZ's avatar
Creeper-MZ committed
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
                                    # Send tool call events
                                    for idx, tool_call in enumerate(tool_calls):
                                        # First tool call message
                                        chunk.choices = [{
                                            "index": 0,
                                            "delta": {
                                                "role": "assistant",
                                                "content": None,
                                                "tool_calls": [{
                                                    "index": idx,
                                                    "id": tool_call["id"],
                                                    "type": "function",
                                                    "function": {
                                                        "name": tool_call["function"]["name"],
                                                        "arguments": ""
                                                    }
                                                }]
                                            },
                                            "finish_reason": None
                                        }]
                                        yield chunk
338

Creeper-MZ's avatar
Creeper-MZ committed
339
340
341
342
343
344
345
346
347
348
349
350
                                        # Sending Parameters
                                        chunk.choices = [{
                                            "index": 0,
                                            "delta": {
                                                "tool_calls": [{
                                                    "index": idx,
                                                    "function": {"arguments": tool_call["function"]["arguments"]}
                                                }]
                                            },
                                            "finish_reason": None
                                        }]
                                        yield chunk
351

Creeper-MZ's avatar
Creeper-MZ committed
352
353
354
355
356
357
358
                                    # Send Completion Message
                                    chunk.choices = [{
                                        "index": 0,
                                        "delta": {},
                                        "finish_reason": "tool_calls"
                                    }]
                                    yield chunk
359

Creeper-MZ's avatar
Creeper-MZ committed
360
361
362
363
364
365
366
367
368
369
370
                                    # No further processing after return
                                    return
                                else:
                                    # JSON extraction failed, probably incomplete formatting
                                    logger.warning("Failed to extract JSON from tool call")
                                    tool_call_mode = False
                                    buffer = ""
                            except Exception as e:
                                logger.error(f"Error processing tool call: {e}")
                                tool_call_mode = False
                                buffer = ""
371

Creeper-MZ's avatar
Creeper-MZ committed
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
                    # Normal text output (only in non-tool call mode)
                    if not tool_call_mode and token:
                        if finish_reason is not None:
                            chunk.choices = [{
                                "index": 0,
                                "delta": {},
                                "finish_reason": finish_reason
                            }]
                            yield chunk
                        else:
                            if any(marker in token for marker in [tool_calls_begin_marker, tool_call_begin_marker]):
                                pass
                            else:
                                chunk.choices = [{
                                    "index": 0,
                                    "delta": {"content": token},
                                    "finish_reason": None
                                }]
                                yield chunk
391

Creeper-MZ's avatar
Creeper-MZ committed
392
393
394
395
            # If gotten this far without returning, it means that the full tool call was not detected
            # Send Routine Completion Message
            if not tool_call_mode:
                chunk.choices = [{
396
397
                    "index": 0,
                    "delta": {},
Creeper-MZ's avatar
Creeper-MZ committed
398
399
400
                    "finish_reason": "stop"
                }]
                yield chunk
401

402
        return chat_stream_response(request, inner())
chenxl's avatar
chenxl committed
403
    else:
Creeper-MZ's avatar
Creeper-MZ committed
404
405
        # non streaming response processing
        full_content = ""
406
        finish_reason = None
Creeper-MZ's avatar
Creeper-MZ committed
407
408
409
        tool_calls = []
        buffer = ""
        tool_call_mode = False
410

Creeper-MZ's avatar
Creeper-MZ committed
411
412
413
414
415
416
        # Custom model special markers
        tool_calls_begin_marker = "<|tool▁calls▁begin|>"
        tool_call_begin_marker = "<|tool▁call▁begin|>"
        tool_sep_marker = "<|tool▁sep|>"
        tool_call_end_marker = "<|tool▁call▁end|>"
        tool_calls_end_marker = "<|tool▁calls▁end|>"
417
418
419
420
421
422
423
        too_calls_dict = {
            "<tools▁begin>":"<|tool▁calls▁begin|>",
            "<tool▁begin>":"<|tool▁call▁begin|>",
            "<tool▁sep>":"<|tool▁sep|>",
            "<tool▁end>":"<|tool▁call▁end|>",
            "<tools▁end>":"<|tool▁calls▁end|>"
        }
424
        async for res in interface.inference(input_message, id, create.temperature, create.top_p, create.max_tokens, create.max_completion_tokens):
425
426
427
            if isinstance(res, RawUsage):
                raw_usage = res
                usage = CompletionUsage(
Creeper-MZ's avatar
Creeper-MZ committed
428
429
430
                    prompt_tokens=raw_usage.prefill_count,
                    completion_tokens=raw_usage.decode_count,
                    total_tokens=raw_usage.prefill_count + raw_usage.decode_count
431
                )
Creeper-MZ's avatar
Creeper-MZ committed
432
            elif isinstance(res, tuple) and len(res) == 2:
433
                token, finish_reason = res
434
                token = re.sub('|'.join(map(re.escape, too_calls_dict.keys())), lambda m: too_calls_dict[m.group(0)], token)
Creeper-MZ's avatar
Creeper-MZ committed
435
436
437
                # Detecting the start of model-specific formatting tool calls
                if not tool_call_mode and tool_calls_begin_marker in buffer + token:
                    tool_call_mode = True
438

Creeper-MZ's avatar
Creeper-MZ committed
439
440
441
442
443
444
445
                    # Adjust full_content to remove tool call section
                    if buffer.endswith(tool_calls_begin_marker):
                        full_content = full_content[:-len(tool_calls_begin_marker)]
                    elif tool_calls_begin_marker in (buffer + token):
                        idx = (buffer + token).find(tool_calls_begin_marker)
                        full_content = full_content[:-(len(buffer) - idx)]
                    buffer = ""
446

Creeper-MZ's avatar
Creeper-MZ committed
447
448
449
450
451
452
453
454
455
456
                # Accumulation of content in non-tool call mode
                if not tool_call_mode:
                    full_content += token
                    buffer += token
                    # Keep the buffer at a reasonable size
                    if len(buffer) > 200:
                        buffer = buffer[-200:]
                else:
                    # In tool call mode, continue to collect tool call related text
                    buffer += token
457

Creeper-MZ's avatar
Creeper-MZ committed
458
459
                    # If the tool call end marker is found
                    if tool_calls_end_marker in buffer:
460
461
462
463
464
465
466
467
468
                        # Extract tool calls
                        tool_calls = getTools(buffer)
                        if tool_calls:
                            finish_reason = "tool_calls"

                        # Reset state
                        tool_call_mode = False
                        buffer = ""

Creeper-MZ's avatar
Creeper-MZ committed
469
        # Build Response
Creeper-MZ's avatar
Creeper-MZ committed
470
471
472
473
474
475
        message = {
            "role": "assistant",
            "content": None if tool_calls else full_content
        }
        if tool_calls:
            message["tool_calls"] = tool_calls
Creeper-MZ's avatar
Creeper-MZ committed
476
477
478
479
480
481
482
        response = {
            "id": id,
            "object": "chat.completion",
            "created": int(time()),
            "model": Config().model_name,
            "choices": [{
                "index": 0,
Creeper-MZ's avatar
Creeper-MZ committed
483
                "message": message,
Creeper-MZ's avatar
Creeper-MZ committed
484
485
                "finish_reason": finish_reason or "stop"
            }],
486
            "usage": usage.__dict__ if 'usage' in locals() else None,
Creeper-MZ's avatar
Creeper-MZ committed
487
488
            "system_fingerprint": f"fp_{uuid4().hex[:12]}"
        }
489
490

        return response