Commit c0d96b32 authored by chenzk's avatar chenzk
Browse files

v1.0

parents
Pipeline #2852 failed with stages
in 0 seconds
import contextlib
import io
import json
import os
import re
import sys
import traceback
import fire
from vllm import LLM, SamplingParams
max_turns = 5
system_prompt_template = """You are an AI Agent who is proficient in solve complicated task.
Each step you should wirte executable code to fulfill user query. Any Response without code means the task is completed and you do not have another chance to submit code
You are equipped with a codeinterpreter. You can give the code and get the execution result of your code. You should use the codeinterpreter in the following format:
<|execute_start|>
```python
<your code>
```
<|execute_end|>
WARNING:Do not use cv2.waitKey(0) cv2.destroyAllWindows()!!! Or the program will be destoried
Each round, your answer should ALWAYS use the following format(Each of your response should contain code, until you complete the task):
Analyse:(Analyse the message you received and plan what you should do)
This Step Todo: One Subtask need to be done at this step
Code(WARNING:MAKE SURE YOU CODE FOLLOW THE FORMAT AND WRITE CODE OR THE TASK WILL BE FAILED):
<|execute_start|>
```python
<your code>
```
<|execute_end|>
You will got the result of your code after each step. When the code of previous subtask is excuted successfully, you can write and excuet the code for next subtask
When all the code your write are executed and you got the code result that can fulfill the user query, you should summarize the previous analyse process and make a formal response to user, The response should follow this format:
WARNING:MAKE SURE YOU GET THE CODE EXECUTED RESULT THAT FULFILLED ALL REQUIREMENT OF USER BEFORE USE "Finished"
Finished: <Answer to user query>
Some notice:
1. When you want to draw a plot, use plt.savefig() and print the image path in markdown format instead of plt.show()
2. Save anything to ./output folder
3. End the process whenever you complete the task, When you do not have Action(Code), Use: Finished: <summary the analyse process and make response>
4. Do not ask for user input in your python code.
"""
def execute_code(code):
stdout_capture = io.StringIO()
stderr_capture = io.StringIO()
# Note here we simplely imitate notebook output.
# if you want to run more complex tasks, try to use nbclient to run python code
lines = code.strip().split('\n')
last_expr = lines[-1].strip()
if '=' in last_expr:
value = last_expr.split('=')[0].strip()
code += f"\nprint({value})"
with contextlib.redirect_stdout(stdout_capture), contextlib.redirect_stderr(stderr_capture):
try:
# execute code here
exec(code)
except Exception as e:
return {'output': stdout_capture.getvalue(), 'error': str(e)}
return {'output': stdout_capture.getvalue(), 'error': stderr_capture.getvalue()}
class DemoLLM:
def __init__(self, model_path):
# Initialize default sampling parameters
params_dict = {
"n": 1,
"best_of": None,
"presence_penalty": 0.0,
"frequency_penalty": 0.0,
"repetition_penalty": 1.02,
"temperature": 1.0,
"top_p": 0.85,
"top_k": -1,
"use_beam_search": False,
"length_penalty": 1.0,
"early_stopping": False,
"stop": None,
"stop_token_ids": None,
"ignore_eos": False,
"max_tokens": 300,
"logprobs": None,
"prompt_logprobs": None,
"skip_special_tokens": True,
}
# Create a SamplingParams object
self.sampling_params = SamplingParams(**params_dict)
# Initialize the language model
self.llm = LLM(
model=model_path,
tensor_parallel_size=1,
trust_remote_code=True,
enforce_eager=True
)
def apply_template(self, messages):
"""Formats messages into a prompt string for the LLM."""
formatted_messages = [
f"<|im_start|>{msg['role']}\n{msg['content']}<|im_end|>\n"
for msg in messages
]
formatted_messages.append("<|im_start|>assistant\n")
return ''.join(formatted_messages)
def generate(self, messages):
"""Generates a response from the LLM based on the input messages."""
raw_input = self.apply_template(messages)
response = self.llm.generate(raw_input, self.sampling_params)
if response:
return response[0].outputs[0].text
return None
def extract_code(text):
""" Extracts Python code blocks from the given text. """
# Define a regular expression pattern to match Python code blocks
pattern = r'```python\s+(.*?)\s+```'
matches = re.findall(pattern, text, re.DOTALL)
return matches
def process(model_path):
"""
Processes interactions with the DemoLLM using provided model path.
Args:
model_path (str): The path to the language model directory.
"""
# Initialize the language model
llm = DemoLLM(model_path)
# Define initial messages
messages = [
{"role": "system", "content": system_prompt_template},
{"role": "user", "content": "2 的 100 次方是多少?"},
]
for index in range(max_turns):
print(f"Turn {index+1} start...")
# Generate response from the LLM
raw_resp = llm.generate(messages)
print(f"Raw response: {raw_resp}")
# Check if the response contains the termination keyword
if "Finished" in raw_resp:
break
# Extract code from the raw response
code_list = extract_code(raw_resp)
if not code_list:
break
# Execute the extracted code
code_str = code_list[-1]
run_result = execute_code(code_str)
executor_response = run_result['output'] if run_result['error'] == "" else run_result['error']
print(f"Code execution result: {run_result}")
# Append the execution result to the messages
messages.append({"role": "user", "content": executor_response})
if __name__ == "__main__":
fire.Fire(process)
\ No newline at end of file
# MiniCPM FunctionCall
1. Start VLLM functioncall server
```shell
python -m vllm.entrypoints.openai.api_server \
--model openbmb/MiniCPM3-4B \
--dtype auto \
--api-key token-abc123 \
--tensor-parallel-size 1 \
--trust-remote-code \
--enable-auto-tool-choice \
--tool-call-parser minicpm \
--tool-parser-plugin minicpm_tool_parser.py
```
2. Functioncall client example
```python
from openai import OpenAI
client = OpenAI(base_url="http://localhost:8000/v1", api_key="token-abc123")
tools = [
{
"type": "function",
"function": {
"name": "get_current_weather",
"description": "Get the current weather in a given location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state, e.g. San Francisco, CA",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
}
}
]
messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]
completion = client.chat.completions.create(
model="openbmb/MiniCPM3-4B",
messages=messages,
tools=tools,
tool_choice="auto"
)
print(completion)
```
3. Run functioncall inference locally
```shell
python functioncall.py
```
# Thanks
- resolve_ast_call and resolve_ast_by_type from [gorilla](https://github.com/ShishirPatil/gorilla)
- minicpm chat template with tool from @CISCai
\ No newline at end of file
#!/usr/bin/env python
# encoding: utf-8
from transformers import AutoTokenizer
from vllm import LLM, SamplingParams
from minicpm_tool_parser import fc2dict
import json
model_path = "openbmb/MiniCPM3-4B"
tools = [
{
"type": "function",
"function": {
"name": "get_delivery_date",
"description": "Get the delivery date for a customer's order. Call this whenever you need to know the delivery date, for example when a customer asks 'Where is my package'",
"parameters": {
"type": "object",
"properties": {
"order_id": {
"type": "string",
"description": "The customer's order ID.",
},
},
"required": ["order_id"],
"additionalProperties": False,
},
},
}
]
messages = [
{
"role": "system",
"content": "You are a helpful customer support assistant. Use the supplied tools to assist the user.",
},
{
"role": "user",
"content": "Hi, can you tell me the delivery date for my order? The order id is 1234 and 4321.",
},
# {
# "content": "",
# "tool_calls": [
# {
# "type": "function",
# "function": {
# "name": "get_delivery_date",
# "arguments": {"order_id": "1234"},
# },
# "id": "call_b4ab0b4ec4b5442e86f017fe0385e22e",
# },
# {
# "type": "function",
# "function": {
# "name": "get_delivery_date",
# "arguments": {"order_id": "4321"},
# },
# "id": "call_628965479dd84794bbb72ab9bdda0c39",
# },
# ],
# "role": "assistant",
# },
# {
# "role": "tool",
# "content": '{"delivery_date": "2024-09-05", "order_id": "1234"}',
# "tool_call_id": "call_b4ab0b4ec4b5442e86f017fe0385e22e",
# },
# {
# "role": "tool",
# "content": '{"delivery_date": "2024-09-05", "order_id": "4321"}',
# "tool_call_id": "call_628965479dd84794bbb72ab9bdda0c39",
# },
# {
# "content": "Both your orders will be delivered on 2024-09-05.",
# "role": "assistant",
# "thought": "\nI have the information you need, both orders will be delivered on the same date, 2024-09-05.\n",
# },
]
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
prompt = tokenizer.apply_chat_template(
messages, tools=tools, tokenize=False, add_generation_prompt=True
)
llm = LLM(model_path, trust_remote_code=True)
sampling_params = SamplingParams(temperature=0.8, top_p=0.95, max_tokens=1000)
def fake_tool_execute(toolcall):
data = {
"delivery_date": "2024-09-05",
"order_id": toolcall.get("function", {})
.get("arguments", {})
.get("order_id", "order_id"),
}
return json.dumps(data)
while True:
prompt = tokenizer.apply_chat_template(
messages, tools=tools, tokenize=False, add_generation_prompt=True
)
outputs = llm.generate([prompt], sampling_params)
response = outputs[0].outputs[0].text
msg = fc2dict(response)
if (
"tool_calls" in msg
and msg["tool_calls"] is not None
and len(msg["tool_calls"]) > 0
):
messages.append(msg)
print(msg)
for toolcall in msg["tool_calls"]:
tool_response = fake_tool_execute(toolcall)
tool_msg = {
"role": "tool",
"content": tool_response,
"tool_call_id": toolcall["id"],
}
messages.append(tool_msg)
print(tool_msg)
else:
messages.append(msg)
print(msg)
break
{%- macro json_to_python_type(param_name, json_spec) %}
{%- set basic_type_map = {
'string': 'str',
'number': 'float',
'integer': 'int',
'boolean': 'bool',
'null': 'None'
} %}
{%- if json_spec.enum %}
{{- param_name|title }}
{%- elif basic_type_map[json_spec.type] is defined %}
{{- basic_type_map[json_spec.type] }}
{%- elif json_spec.type == 'array' %}
{{- 'List[' + json_to_python_type(param_name, json_spec['items']) + ']' }}
{%- elif json_spec.type == 'object' %}
{{- 'Dict[str, ' + json_to_python_type(param_name, json_spec.additionalProperties if json_spec.additionalProperties else 'Any') + ']' if not json_spec.properties else param_name|title }}
{%- elif json_spec.type is iterable %}
{{- 'Union[' }}
{%- for t in json_spec.type %}
{{- json_to_python_type(param_name, {'type': t}) }}
{{- ', ' if not loop.last }}
{%- endfor %}
{{- ']' }}
{%- else %}
{{- 'Any' }}
{%- endif %}
{%- endmacro %}
{%- macro object_to_fields(json_spec, field_indent) %}
{%- set o_ns = namespace(f = caller()) %}
{%- for param_name, param_fields in json_spec.properties|items %}
{%- if param_fields.enum %}
{{- '\n\nclass ' + param_name|title + '(Enum):\n' }}
{%- for enum_option in param_fields.enum %}
{{- ' enum_' + loop.index0|string + ' = ' + enum_option|tojson + '\n' }}
{%- endfor %}
{%- elif param_fields.type == 'object' and param_fields.properties %}
{%- call object_to_fields(param_fields, ' ') %}
{{- '\n\nclass ' + param_name|title + '(BaseModel):\n' }}
{%- endcall %}
{%- elif param_fields.type == 'array' and param_fields['items'] and param_fields['items'].type == 'object' and param_fields['items'].properties %}
{%- call object_to_fields(param_fields['items'], ' ') %}
{{- '\n\nclass ' + param_name|title + '(BaseModel):\n' }}
{%- endcall %}
{%- endif %}
{%- set param_default = param_fields.default|tojson if param_fields.default is string else param_fields.default|string if param_fields.default is defined else 'None' %}
{%- set o_ns.f = o_ns.f + field_indent + param_name + ': ' %}
{%- set o_ns.f = o_ns.f + ('Optional[' + json_to_python_type(param_name, param_fields) + ']' if param_name not in json_spec.required else json_to_python_type(param_name, param_fields)) %}
{%- if not param_fields.title and not param_fields.description and not param_fields.pattern %}
{%- set o_ns.f = o_ns.f + (' = ' + param_default if param_name not in json_spec.required else '') %}
{%- else %}
{%- set o_ns.f = o_ns.f + (' = Field(...' if param_name in json_spec.required else ' = Field(' + param_default) %}
{%- set o_ns.f = o_ns.f + (', description=' + param_fields.description|tojson if param_fields.description else '') %}
{%- set o_ns.f = o_ns.f + (', regex=' + param_fields.pattern|tojson if param_fields.pattern else '') %}
{%- set o_ns.f = o_ns.f + (', title=' + param_fields.title|tojson if param_fields.title else '') %}
{%- set o_ns.f = o_ns.f + ')' %}
{%- endif %}
{%- set o_ns.f = o_ns.f + '\n' %}
{%- endfor %}
{{- o_ns.f }}
{%- endmacro %}
{%- macro tool_parser(tools) %}
{%- for tool in tools %}
{%- if tool.type is not defined or tool.type == 'function' %}
{%- if tool.function is defined %}
{%- set tool = tool.function %}
{%- endif %}
{%- set tool_params = tool.parameters if tool.parameters is defined else none %}
{%- call object_to_fields(tool_params, ' ') %}
{{- '\n\ndef ' + tool.name + '(' }}
{%- if tool_params %}
{%- for param_name, param_fields in tool_params.properties|items %}
{%- set param_default = param_fields.default|tojson if param_fields.default is string else param_fields.default|string if param_fields.default is defined else 'None' %}
{{- ', ' if loop.index0 != 0 }}
{{- param_name }}
{{- '=' + param_default if param_name not in tool_params.required }}
{%- endfor %}
{%- endif %}
{{- '):\n """' }}
{{- tool.description }}
{{- '\n\n Args:\n' if tool_params else '\n' }}
{%- endcall %}
{{- ' """\n' }}
{%- endif %}
{%- endfor %}
{%- endmacro %}
{%- if messages[0]['role'] == 'system' %}
{%- set loop_messages = messages[1:] %}
{%- set system_message = messages[0]['content'] %}
{%- else %}
{%- set loop_messages = messages %}
{%- set system_message = '' %}
{%- endif %}
{{- '<|im_start|>system\n' + system_message if system_message or tools }}
{%- if tools %}
{{- '\n# Functions\nHere is a list of functions that you can invoke:\n```python\nfrom enum import Enum\nfrom typing import List, Dict, Optional\nfrom pydantic import BaseModel, Field\n\n' }}
{{- tool_parser(tools) }}
{{- "\n```\n\n# Function Call Rule and Output Format\n- If the user's question can be answered without calling any function, please answer the user's question directly. In this situation, you should return your thought and answer the user's question directly.\n- If the user cannot be answered without calling any function, and the user does not provide enough information to call functions, please ask the user for more information. In this situation, you should return your thought and ask the user for more information.\n- If the user's question cannot be answered without calling any function, and the user has provided enough information to call functions to solve it, you should call the functions. In this situation, the assistant should return your thought and call the functions.\n- Use default parameters unless the user has specified otherwise.\n- You should answer in the following format:\n\n<|thought_start|>\n{explain why the user's question can be answered without calling a function or why you should ask the user for more information or why you should call one or more functions and your plan to solve the user's question.}\n<|thought_end|>\n<|tool_call_start|>\n```python\nfunc1(params_name=params_value, params_name2=params_value2...)\nfunc2(params)\n```\n<|tool_call_end|>\n{answer the user's question directly or ask the user for more information}" }}
{%- endif %}
{{- '<|im_end|>\n' if system_message or tools }}
{%- for message in loop_messages %}
{%- set content = message.content %}
{%- if message.role == 'assistant' and message.tool_calls %}
{{- '<|im_start|>' + message.role + '\n' }}
{{- '<|thought_start|>\n' + message.thought + '\n<|thought_end|>\n' if message.thought }}
{{- '<|tool_call_start|>\n```python\n' }}
{%- for tool_call in message.tool_calls %}
{%- if tool_call.function is defined %}
{%- set tool_call = tool_call.function %}
{%- endif %}
{{- tool_call.name + '(' }}
{%- if tool_call.arguments is defined and tool_call.arguments|length > 0 %}
{%- for param_name, param_value in tool_call.arguments|items %}
{{- param_name + '=' + param_value|tojson }}
{{- ',' if not loop.last }}
{%- endfor %}
{%- endif %}
{{- ')\n' }}
{%- endfor %}
{{- '```\n<|tool_call_end|>\n' }}
{{- content if content and not content.startswith('<|tool_call_start|>') }}
{{- '<|im_end|>\n' }}
{%- elif message.role == 'assistant' and message.thought %}
{{- '<|im_start|>' + message.role + '\n' + '<|thought_start|>\n' + message.thought + '\n<|thought_end|>\n' + content + '<|im_end|>\n' }}
{%- else %}
{{- '<|im_start|>' + message.role + '\n' + content + '<|im_end|>\n' }}
{%- endif %}
{%- endfor %}
{%- if add_generation_prompt %}
{{- '<|im_start|>assistant\n' }}
{%- endif %}
#!/usr/bin/env python
# encoding: utf-8
import ast
import json
import keyword
import re
import traceback
from typing import Dict, List, Sequence, Union
from transformers import PreTrainedTokenizerBase
from vllm.entrypoints.openai.protocol import (ChatCompletionRequest,
DeltaFunctionCall, DeltaMessage,
DeltaToolCall,
ExtractedToolCallInformation,
FunctionCall, ToolCall)
from vllm.entrypoints.openai.tool_parsers.abstract_tool_parser import (
ToolParser, ToolParserManager)
from vllm.logger import init_logger
logger = init_logger(__name__)
@ToolParserManager.register_module("minicpm")
class MiniCPMToolParser(ToolParser):
"""
Tool call parser for MiniCPM3 4B models intended for use with the
examples/tool_chat_template_minicpm3.jinja template.
Used when --enable-auto-tool-choice --tool-call-parser minicpm are all set
"""
def __init__(self, tokenizer: PreTrainedTokenizerBase):
super().__init__(tokenizer)
self.thought_start_token = "<|thought_start|>"
self.thought_end_token = "<|thought_end|>"
self.tool_call_start_token = "<|tool_call_start|>"
self.tool_call_end_token = "<|tool_call_end|>"
self.stop_token_ids = [2, 73440]
def extract_tool_calls(
self, model_output: str,
request: ChatCompletionRequest) -> ExtractedToolCallInformation:
"""
Extract the tool calls from a complete model response.
"""
msg = fc2dict(model_output)
if ("tool_calls" in msg and msg["tool_calls"] is not None
and len(msg["tool_calls"]) > 0):
tool_calls: List[ToolCall] = [
ToolCall(
type="function",
function=FunctionCall(
name=raw_function_call["name"],
# function call args are JSON but as a string
arguments=json.dumps(raw_function_call["arguments"],
ensure_ascii=False),
),
) for raw_function_call in msg["tool_calls"]
]
# get any content before the tool call
ret = ExtractedToolCallInformation(
tools_called=True,
tool_calls=tool_calls,
content=msg.get("content", None),
)
return ret
else:
return ExtractedToolCallInformation(
tools_called=True,
tool_calls=[],
content=msg.get("content", None),
)
def extract_tool_calls_streaming(
self,
previous_text: str,
current_text: str,
delta_text: str,
previous_token_ids: Sequence[int],
current_token_ids: Sequence[int],
delta_token_ids: Sequence[int],
request: ChatCompletionRequest,
) -> Union[DeltaMessage, None]:
# if no tools are provided, we don't need to parse tool calls
if not request.tools:
return DeltaMessage(content=delta_text)
if self.thought_end_token not in current_text:
return None
useful_text = current_text.split(self.thought_end_token)[1]
if (current_token_ids[-1]
in self.stop_token_ids): # case 3: stream generation ended
msg = fc2dict(current_text)
if ("tool_calls" in msg and msg["tool_calls"] is not None
and len(msg["tool_calls"]) > 0):
self.prev_tool_call_arr = msg["tool_calls"]
self.streamed_args_for_tool = ["" for tc in msg["tool_calls"]]
delta_message = DeltaMessage(
role="assistant",
content=msg.get("content", None),
)
return delta_message
else:
return DeltaMessage(content=msg.get("content", None))
elif (self.tool_call_start_token in useful_text
and self.tool_call_end_token
in useful_text): # case 2: tool call ended
return None
elif (self.tool_call_start_token
in useful_text): # case 1: tool call started
# Extract function name and arguments, handling nested parentheses
pattern = r"(\w+)\(((?:[^()]*|\([^()]*\))*)\)"
matches = re.finditer(pattern, useful_text)
tool_calls: List[Dict] = []
delta = None
for idx, match in enumerate(matches):
if self.current_tool_id < idx:
self.current_tool_id = idx
func_name = match.group(1)
func_args = match.group(2)
tool_call_string = f"{func_name}({func_args})\n"
parsed = ast.parse(tool_call_string)
for elem in parsed.body:
assert isinstance(elem.value, ast.Call) # type: ignore
calls = resolve_ast_call(elem.value) # type: ignore
for func_name, func_args in calls.items():
this_call = {
"name":
func_name,
"arguments":
json.dumps(func_args, ensure_ascii=False),
}
delta = DeltaMessage(tool_calls=[
DeltaToolCall(
index=self.current_tool_id,
function=DeltaFunctionCall(
**this_call).model_dump(exclude_none=True),
)
])
self.prev_tool_call_arr = tool_calls
self.streamed_args_for_tool = ["" for x in tool_calls]
self.current_tool_name_sent = True
return delta
else:
return None
def fc2dict(
sequence: str,
tool_call_start="<|tool_call_start|>",
tool_call_end="<|tool_call_end|>",
thought_start="<|thought_start|>",
thought_end="<|thought_end|>",
):
if thought_end in sequence and thought_start in sequence:
thought_string, sequence = sequence.rsplit(thought_end, 1)
thought_string = thought_string.split(thought_start, 1)[1]
else:
thought_string = ""
if tool_call_start in sequence and tool_call_end in sequence:
tool_call_string, content = sequence.rsplit(tool_call_end, 1)
tool_call_string = tool_call_string.split(tool_call_start, 1)[1]
try:
tool_calls = []
tool_call_string = tool_call_string.strip()
if tool_call_string.startswith("```"):
tool_call_string = tool_call_string[3:].strip()
if tool_call_string.startswith("python"):
tool_call_string = tool_call_string.lstrip(
"python").strip()
if tool_call_string.endswith("```"):
tool_call_string = tool_call_string[:-3].strip()
for kw in keyword.kwlist:
tool_call_string = tool_call_string.replace(
"," + kw + "=", "," + kw + "_=")
tool_call_string = tool_call_string.replace(
" " + kw + "=", " " + kw + "_=")
tool_call_string = tool_call_string.replace(
"(" + kw + "=", "(" + kw + "_=")
parsed: ast.Module = ast.parse(tool_call_string)
for elem in parsed.body:
assert isinstance(elem.value, ast.Call) # type: ignore
calls = resolve_ast_call(elem.value) # type: ignore
for func_name, func_args in calls.items():
new_args = {}
for k, v in func_args.items():
for kw in keyword.kwlist:
if k == kw + "_":
k = kw
new_args[k] = v
this_one = {"name": func_name, "arguments": new_args}
tool_calls.append(this_one)
return {
"content": content.strip(),
"tool_calls": tool_calls,
"role": "assistant",
}
except Exception as e:
logger.error("Error parsing tool call: %s", str(e))
logger.error(traceback.format_exc())
return {
"content": content.strip(),
"role": "assistant",
"thought": thought_string,
}
else:
return {
"content": sequence.strip(),
"role": "assistant",
"thought": thought_string,
}
# from ShishirPatil/gorilla
def resolve_ast_call(elem):
# Handle nested attributes for deeply nested module paths
func_parts = []
func_part = elem.func
while isinstance(func_part, ast.Attribute):
func_parts.append(func_part.attr)
func_part = func_part.value
if isinstance(func_part, ast.Name):
func_parts.append(func_part.id)
func_name = ".".join(reversed(func_parts))
args_dict = {}
for arg in elem.keywords:
output = resolve_ast_by_type(arg.value)
args_dict[arg.arg] = output
return {func_name: args_dict}
def resolve_ast_by_type(value):
if isinstance(value, ast.Constant):
output = "..." if value.value is Ellipsis else value.value
elif isinstance(value, ast.UnaryOp):
output = -value.operand.value # type: ignore
elif isinstance(value, ast.List):
output = [resolve_ast_by_type(v) for v in value.elts]
elif isinstance(value, ast.Dict):
output = {
resolve_ast_by_type(k): resolve_ast_by_type(v)
for k, v in zip(value.keys, value.values)
}
elif isinstance(
value,
ast.NameConstant): # Added this condition to handle boolean values
output = value.value
elif isinstance(
value, ast.BinOp
): # Added this condition to handle function calls as arguments
output = ast.literal_eval(ast.unparse(value)) # type: ignore
elif isinstance(value, ast.Name):
output = value.id
elif isinstance(value, ast.Call):
if len(value.keywords) == 0:
output = ast.unparse(value) # type: ignore
else:
output = resolve_ast_call(value)
elif isinstance(value, ast.Tuple):
output = tuple(resolve_ast_by_type(v) for v in value.elts)
elif isinstance(value, ast.Lambda):
output = ast.literal_eval(
ast.unparse( # type: ignore
value.body[0].value)) # type: ignore
elif isinstance(value, ast.Ellipsis):
output = "..."
elif isinstance(value, ast.Subscript):
try:
output = ast.unparse(value.body[0].value) # type: ignore
except Exception as e:
logger.error("Error parsing tool call: %s", str(e))
output = (
ast.unparse(value.value) + "[" + # type: ignore
ast.unparse(value.slice) + "]") # type: ignore
else:
raise Exception(f"Unsupported AST type: {type(value)}")
return output
This diff is collapsed.
This diff is collapsed.
{
"available_tools": [
{
"function": {
"description": "",
"name": "searchPOI",
"parameters": {
"$schema": "http://json-schema.org/draft-07/schema#",
"additionalProperties": false,
"properties": {
"city": {
"description": "搜索的城市名称",
"type": "string"
},
"extensions": {
"description": "返回结果控制,base返回基本信息,all返回详细信息",
"enum": [
"base",
"all"
],
"type": "string"
},
"keywords": {
"description": "要搜索的关键词",
"type": "string"
},
"offset": {
"description": "每页记录数,默认为20",
"type": "number"
},
"page": {
"description": "页码,默认为1",
"type": "number"
},
"types": {
"description": "POI类型",
"type": "string"
}
},
"required": [
"keywords"
],
"type": "object"
}
},
"type": "function"
},
{
"function": {
"description": "",
"name": "getWeather",
"parameters": {
"$schema": "http://json-schema.org/draft-07/schema#",
"additionalProperties": false,
"properties": {
"city": {
"description": "城市编码",
"type": "string"
},
"extensions": {
"description": "气象类型:base(实况天气)、all(预报天气)",
"enum": [
"base",
"all"
],
"type": "string"
}
},
"required": [
"city"
],
"type": "object"
}
},
"type": "function"
}
]
}
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
accelerate==1.2.1
datasets==3.2.0
deepspeed==0.16.5
einops==0.8.1
flash_attn==2.7.4.post1
flashinfer-python==0.2.5
huggingface-hub==0.30.2
peft==0.15.1
rouge-chinese==1.0.3
safetensors==0.5.3
sentencepiece==0.2.0
tokenizers==0.21.0
torch==2.6.0
transformers==4.49.0
trl==0.9.6
vllm==0.8.4
wandb==0.19.9
xformers==0.0.29.post2
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
# Logs
logs
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
pnpm-debug.log*
lerna-debug.log*
node_modules
dist
dist-ssr
*.local
# Editor directories and files
.vscode/*
!.vscode/extensions.json
.idea
.DS_Store
*.suo
*.ntvs*
*.njsproj
*.sln
*.sw?
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8" />
<link rel="icon" type="image/png" href="/openbmb.svg" />
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<title>MiniCPM4-Survey</title>
</head>
<body>
<div id="root"></div>
<script type="module" src="/src/main.jsx"></script>
</body>
</html>
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment