Commit 396700dd authored by chenzk's avatar chenzk
Browse files

v1.0

parents
Pipeline #2603 failed with stages
in 0 seconds
import asyncio
import logging
import os
import sys
from typing_extensions import Annotated, Doc
from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
from dbgpt.agent.expand.tool_assistant_agent import ToolAssistantAgent
from dbgpt.agent.resource import ToolPack, tool
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
)
@tool
def simple_calculator(first_number: int, second_number: int, operator: str) -> float:
"""Simple calculator tool. Just support +, -, *, /."""
if isinstance(first_number, str):
first_number = int(first_number)
if isinstance(second_number, str):
second_number = int(second_number)
if operator == "+":
return first_number + second_number
elif operator == "-":
return first_number - second_number
elif operator == "*":
return first_number * second_number
elif operator == "/":
return first_number / second_number
else:
raise ValueError(f"Invalid operator: {operator}")
@tool
def count_directory_files(path: Annotated[str, Doc("The directory path")]) -> int:
"""Count the number of files in a directory."""
if not os.path.isdir(path):
raise ValueError(f"Invalid directory path: {path}")
return len(os.listdir(path))
async def main():
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)
agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="test456")
context: AgentContext = AgentContext(conv_id="test456", gpts_app_name="工具助手")
tools = ToolPack([simple_calculator, count_directory_files])
user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
tool_engineer = (
await ToolAssistantAgent()
.bind(context)
.bind(LLMConfig(llm_client=llm_client))
.bind(agent_memory)
.bind(tools)
.build()
)
await user_proxy.initiate_chat(
recipient=tool_engineer,
reviewer=user_proxy,
message="Calculate the product of 10 and 99",
)
await user_proxy.initiate_chat(
recipient=tool_engineer,
reviewer=user_proxy,
message="Count the number of files in /tmp",
)
# dbgpt-vis message infos
print(await agent_memory.gpts_memory.app_link_chat_message("test456"))
if __name__ == "__main__":
asyncio.run(main())
"""Agents: single agents about CodeAssistantAgent?
Examples:
Execute the following command in the terminal:
Set env params.
.. code-block:: shell
export SILICONFLOW_API_KEY=sk-xx
export SILICONFLOW_API_BASE=https://xx:80/v1
run example.
..code-block:: shell
python examples/agents/plugin_agent_dialogue_example.py
"""
import asyncio
import os
from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
from dbgpt.agent.expand.tool_assistant_agent import ToolAssistantAgent
from dbgpt.agent.resource import AutoGPTPluginToolPack, MCPToolPack
from dbgpt.configs.model_config import ROOT_PATH
from dbgpt.model.proxy import SiliconFlowLLMClient
test_plugin_dir = os.path.join(ROOT_PATH, "examples/test_files/plugins")
async def main():
### Test method
# 1.start mcp server as a sse server
# Reference https://github.com/supercorp-ai/supergateway
# npx -y supergateway --stdio "uvx mcp-server-fetch"
# or
# npx -y supergateway --stdio "npx -y @modelcontextprotocol/server-filesystem ./"
## ./ 可以替换为你需要代理的目录
# 2.bind dbgpt resource MCPToolPack use mcp sse server lisk this:
# MCPToolPack("http://127.0.0.1:8000/sse")
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)
agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="test456")
context: AgentContext = AgentContext(
conv_id="test456", gpts_app_name="MCP工具对话助手"
)
tools = MCPToolPack("http://127.0.0.1:8000/sse")
user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
tool_engineer = (
await ToolAssistantAgent()
.bind(context)
.bind(LLMConfig(llm_client=llm_client))
.bind(agent_memory)
.bind(tools)
.build()
)
await user_proxy.initiate_chat(
recipient=tool_engineer,
reviewer=user_proxy,
message="看下这个页面: https://www.cnblogs.com/fnng/p/18744210", ##配合 mcp-server-fetch 使用
# message="有多少个文件", ## 配合server-filesystem 这个mcp使用
)
# dbgpt-vis message infos
print(await agent_memory.gpts_memory.app_link_chat_message("test456"))
if __name__ == "__main__":
asyncio.run(main())
"""Agents: single agents about CodeAssistantAgent?
Examples:
Execute the following command in the terminal:
Set env params.
.. code-block:: shell
export SILICONFLOW_API_KEY=sk-xx
export SILICONFLOW_API_BASE=https://xx:80/v1
run example.
..code-block:: shell
python examples/agents/plugin_agent_dialogue_example.py
"""
import asyncio
import os
from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
from dbgpt.agent.expand.tool_assistant_agent import ToolAssistantAgent
from dbgpt.agent.resource import AutoGPTPluginToolPack
from dbgpt.configs.model_config import ROOT_PATH
test_plugin_dir = os.path.join(ROOT_PATH, "examples/test_files/plugins")
async def main():
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)
agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="test456")
context: AgentContext = AgentContext(
conv_id="test456", gpts_app_name="插件对话助手"
)
tools = AutoGPTPluginToolPack(test_plugin_dir)
user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
tool_engineer = (
await ToolAssistantAgent()
.bind(context)
.bind(LLMConfig(llm_client=llm_client))
.bind(agent_memory)
.bind(tools)
.build()
)
await user_proxy.initiate_chat(
recipient=tool_engineer,
reviewer=user_proxy,
message="查询今天成都的天气",
)
# dbgpt-vis message infos
print(await agent_memory.gpts_memory.app_link_chat_message("test456"))
if __name__ == "__main__":
asyncio.run(main())
import asyncio
import logging
import os
import sys
from typing_extensions import Annotated, Doc
from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
from dbgpt.agent.expand.actions.react_action import ReActAction, Terminate
from dbgpt.agent.expand.react_agent import ReActAgent
from dbgpt.agent.resource import ToolPack, tool
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO,
format="%(asctime)s | %(levelname)s | %(name)s | %(message)s",
)
@tool
def simple_calculator(first_number: int, second_number: int, operator: str) -> float:
"""Simple calculator tool. Just support +, -, *, /.
When users need to do numerical calculations, you must use this tool to calculate, \
and you are not allowed to directly infer calculation results from user input or \
external observations.
"""
if isinstance(first_number, str):
first_number = int(first_number)
if isinstance(second_number, str):
second_number = int(second_number)
if operator == "+":
return first_number + second_number
elif operator == "-":
return first_number - second_number
elif operator == "*":
return first_number * second_number
elif operator == "/":
return first_number / second_number
else:
raise ValueError(f"Invalid operator: {operator}")
@tool
def count_directory_files(path: Annotated[str, Doc("The directory path")]) -> int:
"""Count the number of files in a directory."""
if not os.path.isdir(path):
raise ValueError(f"Invalid directory path: {path}")
return len(os.listdir(path))
async def main():
from dbgpt.model import AutoLLMClient
llm_client = AutoLLMClient(
# provider=os.getenv("LLM_PROVIDER", "proxy/deepseek"),
# name=os.getenv("LLM_MODEL_NAME", "deepseek-chat"),
provider=os.getenv("LLM_PROVIDER", "proxy/siliconflow"),
name=os.getenv("LLM_MODEL_NAME", "Qwen/Qwen2.5-Coder-32B-Instruct"),
)
agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="test456")
# It is important to set the temperature to a low value to get a better result
context: AgentContext = AgentContext(
conv_id="test456", gpts_app_name="ReAct", temperature=0.01
)
tools = ToolPack([simple_calculator, count_directory_files, Terminate()])
user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
tool_engineer = (
await ReActAgent(max_retry_count=10)
.bind(context)
.bind(LLMConfig(llm_client=llm_client))
.bind(agent_memory)
.bind(tools)
.build()
)
await user_proxy.initiate_chat(
recipient=tool_engineer,
reviewer=user_proxy,
message="Calculate the product of 10 and 99, then count the number of files in /tmp",
# message="Calculate the product of 10 and 99",
# message="Count the number of files in /tmp",
)
# dbgpt-vis message infos
print(await agent_memory.gpts_memory.app_link_chat_message("test456"))
if __name__ == "__main__":
asyncio.run(main())
"""Agents: single agents about CodeAssistantAgent?
Examples:
Execute the following command in the terminal:
Set env params.
.. code-block:: shell
export SILICONFLOW_API_KEY=sk-xx
export SILICONFLOW_API_BASE=https://xx:80/v1
run example.
..code-block:: shell
python examples/agents/retrieve_summary_agent_dialogue_example.py
"""
import asyncio
import os
from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
from dbgpt.agent.expand.summary_assistant_agent import SummaryAssistantAgent
from dbgpt.configs.model_config import ROOT_PATH
async def main():
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)
context: AgentContext = AgentContext(
conv_id="retrieve_summarize", gpts_app_name="Summary Assistant"
)
agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="retrieve_summarize")
summarizer = (
await SummaryAssistantAgent()
.bind(context)
.bind(LLMConfig(llm_client=llm_client))
.bind(agent_memory)
.build()
)
user_proxy = UserProxyAgent(memory=agent_memory, agent_context=context)
paths_urls = [
os.path.join(ROOT_PATH, "examples/agents/example_files/Nuclear_power.pdf"),
os.path.join(ROOT_PATH, "examples/agents/example_files/Taylor_Swift.pdf"),
"https://en.wikipedia.org/wiki/Modern_Family",
"https://en.wikipedia.org/wiki/Chernobyl_disaster",
]
# TODO add a tool to load the pdf and internet files
await user_proxy.initiate_chat(
recipient=summarizer,
reviewer=user_proxy,
message=f"I want to summarize advantages of Nuclear Power. You can refer the "
f"following file paths and URLs: {paths_urls}",
)
# dbgpt-vis message infos
print(await agent_memory.gpts_memory.app_link_chat_message("retrieve_summarize"))
if __name__ == "__main__":
asyncio.run(main())
"""Run your code assistant agent in a sandbox environment.
This example demonstrates how to create a code assistant agent that can execute code
in a sandbox environment. The agent can execute Python and JavaScript code blocks
and provide the output to the user. The agent can also check the correctness of the
code execution results and provide feedback to the user.
You can limit the memory and file system resources available to the code execution
environment. The code execution environment is isolated from the host system,
preventing access to the internet and other external resources.
"""
import asyncio
import logging
import os
from typing import Optional, Tuple
from dbgpt.agent import (
Action,
ActionOutput,
AgentContext,
AgentMemory,
AgentMemoryFragment,
AgentMessage,
AgentResource,
ConversableAgent,
HybridMemory,
LLMConfig,
ProfileConfig,
UserProxyAgent,
)
from dbgpt.agent.expand.code_assistant_agent import CHECK_RESULT_SYSTEM_MESSAGE
from dbgpt.core import ModelMessageRoleType
from dbgpt.util.code_utils import UNKNOWN, extract_code, infer_lang
from dbgpt.util.string_utils import str_to_bool
from dbgpt.util.utils import colored
from dbgpt.vis.tags.vis_code import Vis, VisCode
logger = logging.getLogger(__name__)
class SandboxCodeAction(Action[None]):
"""Code Action Module."""
def __init__(self, **kwargs):
"""Code action init."""
super().__init__(**kwargs)
self._render_protocol = VisCode()
self._code_execution_config = {}
@property
def render_protocol(self) -> Optional[Vis]:
"""Return the render protocol."""
return self._render_protocol
async def run(
self,
ai_message: str,
resource: Optional[AgentResource] = None,
rely_action_out: Optional[ActionOutput] = None,
need_vis_render: bool = True,
**kwargs,
) -> ActionOutput:
"""Perform the action."""
try:
code_blocks = extract_code(ai_message)
if len(code_blocks) < 1:
logger.info(
f"No executable code found in answer,{ai_message}",
)
return ActionOutput(
is_exe_success=False, content="No executable code found in answer."
)
elif len(code_blocks) > 1 and code_blocks[0][0] == UNKNOWN:
# found code blocks, execute code and push "last_n_messages" back
logger.info(
f"Missing available code block type, unable to execute code,"
f"{ai_message}",
)
return ActionOutput(
is_exe_success=False,
content="Missing available code block type, "
"unable to execute code.",
)
exitcode, logs = await self.execute_code_blocks(code_blocks)
exit_success = exitcode == 0
content = (
logs
if exit_success
else f"exitcode: {exitcode} (execution failed)\n {logs}"
)
param = {
"exit_success": exit_success,
"language": code_blocks[0][0],
"code": code_blocks,
"log": logs,
}
if not self.render_protocol:
raise NotImplementedError("The render_protocol should be implemented.")
view = await self.render_protocol.display(content=param)
return ActionOutput(
is_exe_success=exit_success,
content=content,
view=view,
thoughts=ai_message,
observations=content,
)
except Exception as e:
logger.exception("Code Action Run Failed!")
return ActionOutput(
is_exe_success=False, content="Code execution exception," + str(e)
)
async def execute_code_blocks(self, code_blocks):
"""Execute the code blocks and return the result."""
from lyric import (
PyTaskFsConfig,
PyTaskMemoryConfig,
PyTaskResourceConfig,
)
from dbgpt.util.code.server import get_code_server
fs = PyTaskFsConfig(
preopens=[
# Mount the /tmp directory to the /tmp directory in the sandbox
# Directory permissions are set to 3 (read and write)
# File permissions are set to 3 (read and write)
("/tmp", "/tmp", 3, 3),
# Mount the current directory to the /home directory in the sandbox
# Directory and file permissions are set to 1 (read)
(".", "/home", 1, 1),
]
)
memory = PyTaskMemoryConfig(memory_limit=50 * 1024 * 1024) # 50MB in bytes
resources = PyTaskResourceConfig(
fs=fs,
memory=memory,
env_vars=[
("TEST_ENV", "hello, im an env var"),
("TEST_ENV2", "hello, im another env var"),
],
)
code_server = await get_code_server()
logs_all = ""
exitcode = -1
for i, code_block in enumerate(code_blocks):
lang, code = code_block
if not lang:
lang = infer_lang(code)
print(
colored(
f"\n>>>>>>>> EXECUTING CODE BLOCK {i} "
f"(inferred language is {lang})...",
"red",
),
flush=True,
)
if lang in ["python", "Python"]:
result = await code_server.exec(code, "python", resources=resources)
exitcode = result.exit_code
logs = result.logs
elif lang in ["javascript", "JavaScript"]:
result = await code_server.exec(code, "javascript", resources=resources)
exitcode = result.exit_code
logs = result.logs
else:
# In case the language is not supported, we return an error message.
exitcode, logs = (
1,
f"unknown language {lang}",
)
logs_all += "\n" + logs
if exitcode != 0:
return exitcode, logs_all
return exitcode, logs_all
class SandboxCodeAssistantAgent(ConversableAgent):
"""Code Assistant Agent."""
profile: ProfileConfig = ProfileConfig(
name="Turing",
role="CodeEngineer",
goal=(
"Solve tasks using your coding and language skills.\n"
"In the following cases, suggest python code (in a python coding block) or "
"javascript for the user to execute.\n"
" 1. When you need to collect info, use the code to output the info you "
"need, for example, get the current date/time, check the "
"operating system. After sufficient info is printed and the task is ready "
"to be solved based on your language skill, you can solve the task by "
"yourself.\n"
" 2. When you need to perform some task with code, use the code to "
"perform the task and output the result. Finish the task smartly."
),
constraints=[
"The user cannot provide any other feedback or perform any other "
"action beyond executing the code you suggest. The user can't modify "
"your code. So do not suggest incomplete code which requires users to "
"modify. Don't use a code block if it's not intended to be executed "
"by the user.Don't ask users to copy and paste results. Instead, "
"the 'Print' function must be used for output when relevant.",
"When using code, you must indicate the script type in the code block. "
"Please don't include multiple code blocks in one response.",
"If you receive user input that indicates an error in the code "
"execution, fix the error and output the complete code again. It is "
"recommended to use the complete code rather than partial code or "
"code changes. If the error cannot be fixed, or the task is not "
"resolved even after the code executes successfully, analyze the "
"problem, revisit your assumptions, gather additional information you "
"need from historical conversation records, and consider trying a "
"different approach.",
"Unless necessary, give priority to solving problems with python code.",
"The output content of the 'print' function will be passed to other "
"LLM agents as dependent data. Please control the length of the "
"output content of the 'print' function. The 'print' function only "
"outputs part of the key data information that is relied on, "
"and is as concise as possible.",
"Your code will by run in a sandbox environment(supporting python and "
"javascript), which means you can't access the internet or use any "
"libraries that are not in standard library.",
"It is prohibited to fabricate non-existent data to achieve goals.",
],
desc=(
"Can independently write and execute python/shell code to solve various"
" problems"
),
)
def __init__(self, **kwargs):
"""Create a new CodeAssistantAgent instance."""
super().__init__(**kwargs)
self._init_actions([SandboxCodeAction])
async def correctness_check(
self, message: AgentMessage
) -> Tuple[bool, Optional[str]]:
"""Verify whether the current execution results meet the target expectations."""
task_goal = message.current_goal
action_report = message.action_report
if not action_report:
return False, "No execution solution results were checked"
check_result, model = await self.thinking(
messages=[
AgentMessage(
role=ModelMessageRoleType.HUMAN,
content="Please understand the following task objectives and "
f"results and give your judgment:\n"
f"Task goal: {task_goal}\n"
f"Execution Result: {action_report.content}",
)
],
prompt=CHECK_RESULT_SYSTEM_MESSAGE,
)
success = str_to_bool(check_result)
fail_reason = None
if not success:
fail_reason = (
f"Your answer was successfully executed by the agent, but "
f"the goal cannot be completed yet. Please regenerate based on the "
f"failure reason:{check_result}"
)
return success, fail_reason
async def main():
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)
context: AgentContext = AgentContext(conv_id="test123")
# TODO Embedding and Rerank model refactor
from dbgpt.rag.embedding import OpenAPIEmbeddings
silicon_embeddings = OpenAPIEmbeddings(
api_url=os.getenv("SILICONFLOW_API_BASE") + "/embeddings",
api_key=os.getenv("SILICONFLOW_API_KEY"),
model_name="BAAI/bge-large-zh-v1.5",
)
agent_memory = AgentMemory(
HybridMemory[AgentMemoryFragment].from_chroma(
embeddings=silicon_embeddings,
)
)
agent_memory.gpts_memory.init("test123")
coder = (
await SandboxCodeAssistantAgent()
.bind(context)
.bind(LLMConfig(llm_client=llm_client))
.bind(agent_memory)
.build()
)
user_proxy = await UserProxyAgent().bind(context).bind(agent_memory).build()
# First case: The user asks the agent to calculate 321 * 123
await user_proxy.initiate_chat(
recipient=coder,
reviewer=user_proxy,
message="计算下321 * 123等于多少",
)
await user_proxy.initiate_chat(
recipient=coder,
reviewer=user_proxy,
message="Calculate 100 * 99, must use javascript code block",
)
if __name__ == "__main__":
asyncio.run(main())
"""Agents: single agents about CodeAssistantAgent?
Examples:
Execute the following command in the terminal:
Set env params.
.. code-block:: shell
export OPENAI_API_KEY=sk-xx
export OPENAI_API_BASE=https://xx:80/v1
run example.
..code-block:: shell
python examples/agents/single_agent_dialogue_example.py
"""
import asyncio
import os
from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
from dbgpt.agent.expand.code_assistant_agent import CodeAssistantAgent
async def main():
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)
context: AgentContext = AgentContext(conv_id="test123", gpts_app_name="代码助手")
agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="test123")
try:
coder = (
await CodeAssistantAgent()
.bind(context)
.bind(LLMConfig(llm_client=llm_client))
.bind(agent_memory)
.build()
)
user_proxy = await UserProxyAgent().bind(context).bind(agent_memory).build()
await user_proxy.initiate_chat(
recipient=coder,
reviewer=user_proxy,
message="计算下321 * 123等于多少", # 用python代码的方式计算下321 * 123等于多少
# message="download data from https://raw.githubusercontent.com/uwdata/draco/master/data/cars.csv and plot a visualization that tells us about the relationship between weight and horsepower. Save the plot to a file. Print the fields in a dataset before visualizing it.",
)
finally:
agent_memory.gpts_memory.clear(conv_id="test123")
if __name__ == "__main__":
asyncio.run(main())
"""Agents: single agents about CodeAssistantAgent?
Examples:
Execute the following command in the terminal:
Set env params.
.. code-block:: shell
export OPENAI_API_KEY=sk-xx
export OPENAI_API_BASE=https://xx:80/v1
run example.
..code-block:: shell
python examples/agents/single_summary_agent_dialogue_example.py
"""
import asyncio
import os
from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
from dbgpt.agent.expand.summary_assistant_agent import SummaryAssistantAgent
async def summary_example_with_success():
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)
context: AgentContext = AgentContext(conv_id="summarize")
agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="summarize")
summarizer = (
await SummaryAssistantAgent()
.bind(context)
.bind(LLMConfig(llm_client=llm_client))
.bind(agent_memory)
.build()
)
user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
await user_proxy.initiate_chat(
recipient=summarizer,
reviewer=user_proxy,
message="""I want to summarize advantages of Nuclear Power according to the following content.
Nuclear power in space is the use of nuclear power in outer space, typically either small fission systems or radioactive decay for electricity or heat. Another use is for scientific observation, as in a Mössbauer spectrometer. The most common type is a radioisotope thermoelectric generator, which has been used on many space probes and on crewed lunar missions. Small fission reactors for Earth observation satellites, such as the TOPAZ nuclear reactor, have also been flown.[1] A radioisotope heater unit is powered by radioactive decay and can keep components from becoming too cold to function, potentially over a span of decades.[2]
The United States tested the SNAP-10A nuclear reactor in space for 43 days in 1965,[3] with the next test of a nuclear reactor power system intended for space use occurring on 13 September 2012 with the Demonstration Using Flattop Fission (DUFF) test of the Kilopower reactor.[4]
After a ground-based test of the experimental 1965 Romashka reactor, which used uranium and direct thermoelectric conversion to electricity,[5] the USSR sent about 40 nuclear-electric satellites into space, mostly powered by the BES-5 reactor. The more powerful TOPAZ-II reactor produced 10 kilowatts of electricity.[3]
Examples of concepts that use nuclear power for space propulsion systems include the nuclear electric rocket (nuclear powered ion thruster(s)), the radioisotope rocket, and radioisotope electric propulsion (REP).[6] One of the more explored concepts is the nuclear thermal rocket, which was ground tested in the NERVA program. Nuclear pulse propulsion was the subject of Project Orion.[7]
Regulation and hazard prevention[edit]
After the ban of nuclear weapons in space by the Outer Space Treaty in 1967, nuclear power has been discussed at least since 1972 as a sensitive issue by states.[8] Particularly its potential hazards to Earth's environment and thus also humans has prompted states to adopt in the U.N. General Assembly the Principles Relevant to the Use of Nuclear Power Sources in Outer Space (1992), particularly introducing safety principles for launches and to manage their traffic.[8]
Benefits
Both the Viking 1 and Viking 2 landers used RTGs for power on the surface of Mars. (Viking launch vehicle pictured)
While solar power is much more commonly used, nuclear power can offer advantages in some areas. Solar cells, although efficient, can only supply energy to spacecraft in orbits where the solar flux is sufficiently high, such as low Earth orbit and interplanetary destinations close enough to the Sun. Unlike solar cells, nuclear power systems function independently of sunlight, which is necessary for deep space exploration. Nuclear-based systems can have less mass than solar cells of equivalent power, allowing more compact spacecraft that are easier to orient and direct in space. In the case of crewed spaceflight, nuclear power concepts that can power both life support and propulsion systems may reduce both cost and flight time.[9]
Selected applications and/or technologies for space include:
Radioisotope thermoelectric generator
Radioisotope heater unit
Radioisotope piezoelectric generator
Radioisotope rocket
Nuclear thermal rocket
Nuclear pulse propulsion
Nuclear electric rocket
""",
)
# dbgpt-vis message infos
print(await agent_memory.gpts_memory.app_link_chat_message("summarize"))
async def summary_example_with_faliure():
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)
context: AgentContext = AgentContext(conv_id="summarize")
agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="summarize")
summarizer = (
await SummaryAssistantAgent()
.bind(context)
.bind(LLMConfig(llm_client=llm_client))
.bind(agent_memory)
.build()
)
user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
# Test the failure example
await user_proxy.initiate_chat(
recipient=summarizer,
reviewer=user_proxy,
message="""I want to summarize advantages of Nuclear Power according to the following content.
Taylor Swift is an American singer-songwriter and actress who is one of the most prominent and successful figures in the music industry. She was born on December 13, 1989, in Reading, Pennsylvania, USA. Taylor Swift gained widespread recognition for her narrative songwriting style, which often draws from her personal experiences and relationships.
Swift's career began in country music, and her self-titled debut album was released in 2006. She quickly became a sensation in the country music scene with hits like "Tim McGraw" and "Teardrops on My Guitar." However, it was her transition to pop music with albums like "Fearless," "Speak Now," and "Red" that catapulted her to international superstardom.
Throughout her career, Taylor Swift has won numerous awards, including multiple Grammy Awards. Her albums consistently top charts, and her songs resonate with a wide audience due to their relatable lyrics and catchy melodies. Some of her most famous songs include "Love Story," "Blank Space," "Shake It Off," "Bad Blood," and "Lover."
Beyond music, Taylor Swift has ventured into acting with roles in movies like "Valentine's Day" and "The Giver." She is also known for her philanthropic efforts and her willingness to use her platform to advocate for various causes.
Taylor Swift is not only a successful artist but also an influential cultural icon known for her evolving musical style, storytelling abilities, and her impact on the entertainment industry.
""",
)
print(await agent_memory.gpts_memory.app_link_chat_message("summarize"))
if __name__ == "__main__":
print(
"\033[92m=======================Start The Summary Assistant with Successful Results==================\033[0m"
)
asyncio.run(summary_example_with_success())
print(
"\033[92m=======================The Summary Assistant with Successful Results Ended==================\n\n\033[91m"
)
print(
"\033[91m=======================Start The Summary Assistant with Fail Results==================\033[91m"
)
asyncio.run(summary_example_with_faliure())
print(
"\033[91m=======================The Summary Assistant with Fail Results Ended==================\033[91m"
)
"""Agents: single agents about CodeAssistantAgent?
Examples:
Execute the following command in the terminal:
Set env params.
.. code-block:: shell
export OPENAI_API_KEY=sk-xx
export OPENAI_API_BASE=https://xx:80/v1
run example.
..code-block:: shell
python examples/agents/single_agent_dialogue_example.py
"""
import asyncio
import os
from dbgpt.agent import AgentContext, AgentMemory, LLMConfig, UserProxyAgent
from dbgpt.agent.expand.data_scientist_agent import DataScientistAgent
from dbgpt.agent.resource import SQLiteDBResource
from dbgpt.configs.model_config import ROOT_PATH
from dbgpt.util.tracer import initialize_tracer
test_plugin_dir = os.path.join(ROOT_PATH, "test_files")
initialize_tracer("/tmp/agent_trace.jsonl", create_system_app=True)
async def main():
from dbgpt.model.proxy.llms.siliconflow import SiliconFlowLLMClient
llm_client = SiliconFlowLLMClient(
model_alias=os.getenv(
"SILICONFLOW_MODEL_VERSION", "Qwen/Qwen2.5-Coder-32B-Instruct"
),
)
context: AgentContext = AgentContext(conv_id="test456")
agent_memory = AgentMemory()
agent_memory.gpts_memory.init(conv_id="test456")
sqlite_resource = SQLiteDBResource("SQLite Database", f"{test_plugin_dir}/dbgpt.db")
user_proxy = await UserProxyAgent().bind(agent_memory).bind(context).build()
sql_boy = (
await DataScientistAgent()
.bind(context)
.bind(LLMConfig(llm_client=llm_client))
.bind(sqlite_resource)
.bind(agent_memory)
.build()
)
await user_proxy.initiate_chat(
recipient=sql_boy,
reviewer=user_proxy,
message="当前库有那些表",
)
## dbgpt-vis message infos
print(await agent_memory.gpts_memory.app_link_chat_message("test456"))
if __name__ == "__main__":
asyncio.run(main())
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment