"tests/vscode:/vscode.git/clone" did not exist on "86a940c15c704a560e3ca427d95f1a900836c616"
Commit ee722eb9 authored by pengcheng888's avatar pengcheng888 Committed by zhuyue
Browse files

issue/567-只处理infinicore.Tensor,能够加载infinicore.Tensor的权重,修改了module.py paramter.py部分代码

parent f6107946
from infinicore.nn import functional
from infinicore.nn.modules import * # noqa: F403
from infinicore.nn.parameter import InfiniCoreParameter as Parameter
__all__ = ["functional"]
__all__ = ["functional", "Parameter"]
from .container import InfiniCoreModuleList as ModuleList
from .module import InfiniCoreModule as Module
from .module_list import InfiniCoreModuleList as ModuleList
from .parameter import InfiniCoreParameter as Parameter
__all__ = ["ModuleList", "Module"]
# ============================================
# Copyright (c) 2025, InfiniCore
#
#
# This file implements InfiniCoreModuleList, which is similar to torch.nn.ModuleList
# but based on InfiniCoreModule for inference purposes.
from typing import List, Optional, Iterator, Union, Sequence, TypeVar
import torch
import operator
from itertools import chain
from collections import OrderedDict
from .module import InfiniCoreModule
from itertools import chain
from typing import Iterator, List, Optional, Sequence, TypeVar, Union
# Define type variable for module compatibility (supports both torch.nn.Module and InfiniCoreModule)
ModuleType = TypeVar('ModuleType', bound=Union[torch.nn.Module, 'InfiniCoreModule'])
from .module import InfiniCoreModule as Module
# Define type variable for module compatibility (supports InfiniCoreModule)
ModuleType = TypeVar("ModuleType", bound=Union["Module"])
class InfiniCoreModuleList(InfiniCoreModule):
class InfiniCoreModuleList(Module):
r"""Holds submodules in a list.
InfiniCoreModuleList can be indexed like a regular Python list, but
......@@ -54,7 +55,9 @@ class InfiniCoreModuleList(InfiniCoreModule):
idx += len(self)
return str(idx)
def __getitem__(self, idx: Union[int, slice]) -> Union[ModuleType, 'InfiniCoreModuleList']:
def __getitem__(
self, idx: Union[int, slice]
) -> Union[ModuleType, "InfiniCoreModuleList"]:
if isinstance(idx, slice):
return self.__class__(list(self._modules.values())[idx])
else:
......@@ -75,7 +78,7 @@ class InfiniCoreModuleList(InfiniCoreModule):
idx_str = self._get_abs_string_index(idx)
if idx_str in self._modules:
del self._modules[idx_str]
# To preserve numbering, self._modules is being reconstructed with modules after deletion
if len(self._modules) > 0:
str_indices = [str(i) for i in range(len(self._modules))]
......@@ -87,10 +90,12 @@ class InfiniCoreModuleList(InfiniCoreModule):
def __iter__(self) -> Iterator[ModuleType]:
return iter(self._modules.values())
def __iadd__(self, modules: Sequence[ModuleType]) -> 'InfiniCoreModuleList':
def __iadd__(self, modules: Sequence[ModuleType]) -> "InfiniCoreModuleList":
return self.extend(modules)
def __add__(self, other: Union[Sequence[ModuleType], 'InfiniCoreModuleList']) -> 'InfiniCoreModuleList':
def __add__(
self, other: Union[Sequence[ModuleType], "InfiniCoreModuleList"]
) -> "InfiniCoreModuleList":
r"""Return a new InfiniCoreModuleList by concatenating with another iterable.
Args:
......@@ -101,22 +106,22 @@ class InfiniCoreModuleList(InfiniCoreModule):
f"InfiniCoreModuleList can only be concatenated with list, tuple, or InfiniCoreModuleList, "
f"got {type(other).__name__}"
)
combined = InfiniCoreModuleList()
for i, module in enumerate(chain(self, other)):
combined.add_module(str(i), module)
return combined
def append(self, module: ModuleType) -> 'InfiniCoreModuleList':
def append(self, module: ModuleType) -> "InfiniCoreModuleList":
r"""Append a given module to the end of the list.
Args:
module (nn.Module or InfiniCoreModule): module to append
module (InfiniCoreModule): module to append
"""
self.add_module(str(len(self)), module)
return self
def extend(self, modules: Sequence[ModuleType]) -> 'InfiniCoreModuleList':
def extend(self, modules: Sequence[ModuleType]) -> "InfiniCoreModuleList":
r"""Append modules from a Python iterable to the end of the list.
Args:
......@@ -130,7 +135,7 @@ class InfiniCoreModuleList(InfiniCoreModule):
f"InfiniCoreModuleList.extend should be called with an "
f"iterable, but got {type(modules).__name__}"
)
offset = len(self)
for i, module in enumerate(modules):
self.add_module(str(offset + i), module)
......@@ -141,7 +146,7 @@ class InfiniCoreModuleList(InfiniCoreModule):
Args:
index (int): index to insert.
module (nn.Module or InfiniCoreModule): module to insert
module ( InfiniCoreModule): module to insert
"""
for i in range(len(self._modules), index, -1):
self._modules[str(i)] = self._modules[str(i - 1)]
......@@ -166,11 +171,11 @@ class InfiniCoreModuleList(InfiniCoreModule):
"""Return a string representation of the ModuleList."""
if len(self) == 0:
return self.__class__.__name__ + "()"
lines = []
for i, module in enumerate(self):
lines.append(f"({i}): {repr(module)}")
main_str = self.__class__.__name__ + "(\n "
main_str += "\n ".join(lines) + "\n)"
return main_str
......
This diff is collapsed.
# Copyright (c) 2025, InfiniCore
#
# This file contains modified code derived from PyTorch's `torch.nn.Parameter`
# implementation, which is licensed under the BSD 3-Clause License.
#
# The modifications include adaptations for the InfiniCore framework.
#
# Original PyTorch source:
# https://github.com/pytorch/pytorch/blob/main/torch/nn/parameter.py
#
# Referencing PyTorch v2.4.0
#
# The use of this file is governed by the BSD 3-Clause License.
import torch
from typing import Optional
from collections import OrderedDict
class InfiniCoreParameter(torch.Tensor):
r"""A kind of Tensor that is to be considered a module parameter.
Parameters are :class:`~torch.Tensor` subclasses, that have a
very special property when used with :class:`InfiniCoreModule` s - when they're
assigned as Module attributes they are automatically added to the list of
its parameters, and will appear e.g. in :meth:`~InfiniCoreModule.parameters` iterator.
Assigning a Tensor doesn't have such effect. This is because one might
want to cache some temporary state, like last hidden state of the RNN, in
the model. If there was no such class as :class:`InfiniCoreParameter`, these
temporaries would get registered too.
Args:
data (Tensor, optional): parameter tensor. If None, creates an empty tensor.
requires_grad (bool, optional): if the parameter requires gradient. Note that
the torch.no_grad() context does NOT affect the default behavior of
Parameter creation--the Parameter will still have `requires_grad=True` in
:class:`~no_grad` mode. See :ref:`locally-disable-grad-doc` for more
details. Default: `True`
Example::
>>> import torch
>>> from infinicore.nn.modules import InfiniCoreModule, InfiniCoreParameter
>>>
>>> class MyModule(InfiniCoreModule):
... def __init__(self):
... super().__init__()
... self.weight = InfiniCoreParameter(torch.randn(10, 5))
... self.bias = InfiniCoreParameter(torch.randn(5))
...
>>> module = MyModule()
>>> for param in module.parameters():
... print(param.shape)
torch.Size([10, 5])
torch.Size([5])
"""
def __new__(cls, data: Optional[torch.Tensor] = None, requires_grad: bool = True):
if data is None:
data = torch.empty(0)
# Handle standard torch.Tensor or InfiniCoreParameter
if type(data) is torch.Tensor or type(data) is InfiniCoreParameter:
# For ease of BC maintenance, keep this path for standard Tensor.
# Eventually (tm), we should change the behavior for standard Tensor to match.
return torch.Tensor._make_subclass(cls, data, requires_grad)
# Path for custom tensors: set a flag on the instance to indicate parameter-ness.
t = data.detach().requires_grad_(requires_grad)
if type(t) is not type(data):
raise RuntimeError(
f"Creating a InfiniCoreParameter from an instance of type {type(data).__name__} "
"requires that detach() returns an instance of the same type, but return "
f"type {type(t).__name__} was found instead. To use the type as a "
"InfiniCoreParameter, please correct the detach() semantics defined by "
"its __torch_dispatch__() implementation."
)
t._is_param = True
return t
# Note: the 3 methods below only apply to standard Tensor. Parameters of custom tensor types
# are still considered that custom tensor type and these methods will not be called for them.
def __deepcopy__(self, memo):
if id(self) in memo:
return memo[id(self)]
else:
result = type(self)(
self.data.clone(memory_format=torch.preserve_format), self.requires_grad
)
memo[id(self)] = result
return result
def __repr__(self):
return "InfiniCoreParameter containing:\n" + super().__repr__()
def __reduce_ex__(self, proto):
# Simplified version for serialization
# In a full implementation, you might want to handle hooks and state
state = getattr(self, '_state', None)
hooks = OrderedDict()
if not state:
return (
_rebuild_parameter,
(self.data, self.requires_grad, hooks),
)
return (
_rebuild_parameter_with_state,
(self.data, self.requires_grad, hooks, state),
)
# Note: __torch_function__ is handled by the Tensor base class
# We don't need to override it for standard Parameter behavior
def _rebuild_parameter(data, requires_grad, hooks):
"""Rebuild a parameter from serialized data."""
param = InfiniCoreParameter(data, requires_grad)
# Apply hooks if any (simplified - full implementation would restore hooks)
return param
def _rebuild_parameter_with_state(data, requires_grad, hooks, state):
"""Rebuild a parameter with extra state from serialized data."""
param = InfiniCoreParameter(data, requires_grad)
param._state = state
# Apply hooks if any (simplified - full implementation would restore hooks)
return param
# Copyright (c) 2025, InfiniCore
#
# This file contains modified code derived from PyTorch's `torch.nn.Parameter`
# implementation, which is licensed under the BSD 3-Clause License.
#
# The modifications include adaptations for the InfiniCore framework.
#
# Original PyTorch source:
# https://github.com/pytorch/pytorch/blob/main/torch/nn/parameter.py
#
# Referencing PyTorch v2.4.0
#
# The use of this file is governed by the BSD 3-Clause License.
from ..tensor import Tensor
class InfiniCoreParameter(Tensor):
r"""A kind of Tensor that is to be considered a module parameter."""
def __init__(self, data=None):
if not isinstance(data, Tensor):
raise ValueError("The `data` variable must be of type `infinicore.Tensor`.")
super().__init__(data._underlying)
def __repr__(self):
return "Parameter containing:\n" + super().__repr__()
def __deepcopy__(self, memo):
raise ValueError("not supported!")
def __reduce_ex__(self, proto):
raise ValueError("not supported!")
import safetensors.torch
import torch
import torch.nn as nn
import safetensors
# ============================================================
# 0. infinicore 包导入,配置测试用 safetensors 临时存储路径
# ============================================================
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../python/infinicore')))
# 使用临时目录,如果不存在则自动创建
save_dir = os.path.join(os.path.dirname(__file__), '../../tmp')
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, "torch_modulelist_with_param.safetensors")
# ============================================================
# 1. 使用 PyTorch 定义并保存模型(使用 torch.nn.ModuleList)
# ============================================================
class TorchModuleListNet(nn.Module):
def __init__(self, in_ch=3, hidden_ch=8, out_ch=3):
super().__init__()
# 使用 torch.nn.ModuleList
self.layers = nn.ModuleList([
nn.Conv2d(in_ch, hidden_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(hidden_ch),
nn.ReLU(),
nn.Conv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(hidden_ch),
nn.ReLU(),
nn.Conv2d(hidden_ch, out_ch, kernel_size=1),
])
# 自定义 Parameter
self.scale = nn.Parameter(torch.ones(1) * 0.5)
self.register_buffer("offset", torch.tensor(0.1))
def forward(self, x):
# 遍历 ModuleList 中的所有层
for layer in self.layers:
x = layer(x)
# 应用自定义参数和 buffer
x = x * self.scale + self.offset
return x
# ===== 保存 Torch 模型 =====
torch_model = TorchModuleListNet()
torch_state_dict = torch_model.state_dict()
safetensors.torch.save_file(torch_state_dict, save_path)
print("✓ PyTorch 模型已保存")
# ============================================================
# 2. 使用 torch 方式加载并推理
# ============================================================
torch_model_infer = TorchModuleListNet()
torch_model_infer.load_state_dict(safetensors.torch.load_file(save_path))
torch_model_infer.eval()
input = torch.rand(1, 3, 8, 8)
torch_model_out = torch_model_infer(input)
print("✓ Torch 输出:", torch_model_out.detach().numpy().mean())
# ============================================================
# 3. 使用 ModuleList 加载并推理
# ============================================================
from nn.modules import Module, ModuleList
class InfiniCoreModuleListNet(Module):
def __init__(self, in_ch=3, hidden_ch=8, out_ch=3):
super().__init__()
# 使用 ModuleList
self.layers = ModuleList([
nn.Conv2d(in_ch, hidden_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(hidden_ch),
nn.ReLU(),
nn.Conv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(hidden_ch),
nn.ReLU(),
nn.Conv2d(hidden_ch, out_ch, kernel_size=1),
])
# 保持与 Torch 模型一致的自定义参数和 buffer
self.scale = nn.Parameter(torch.ones(1) * 0.5)
self.register_buffer("offset", torch.tensor(0.1))
def forward(self, x):
# 遍历 ModuleList 中的所有层
for layer in self.layers:
x = layer(x)
x = x * self.scale + self.offset
return x
# ===== 使用 ModuleListNet 读取 safetensors 并推理 =====
infinicore_model_infer = InfiniCoreModuleListNet()
infinicore_model_infer.load_state_dict(safetensors.torch.load_file(save_path))
infinicore_model_infer.eval()
infinicore_model_out = infinicore_model_infer.forward(input)
print("✓ InfiniCore 输出:", infinicore_model_out.detach().numpy().mean())
# ============================================================
# 4. 对比结果
# ============================================================
diff = (infinicore_model_out - torch_model_out).abs().max().item()
print(f"✓ ModuleList 与 Torch 最大误差: {diff:.8f}")
if diff < 1e-9:
print("✓ ModuleList 与 Torch 精度一致.")
else:
print("✗ ModuleList 与 Torch 精度存在差异.")
# ============================================================
# 5. 测试 ModuleList 的基本功能
# ============================================================
print("\n=== 测试 ModuleList 基本功能 ===")
# 测试 1: 创建和访问
module_list = ModuleList([
nn.Linear(10, 20),
nn.ReLU(),
nn.Linear(20, 5)
])
print(f"✓ 创建 ModuleList,长度: {len(module_list)}")
print(f"✓ 访问第一个模块: {type(module_list[0]).__name__}")
print(f"✓ 访问第二个模块: {type(module_list[1]).__name__}")
# 测试 2: append
module_list.append(nn.Softmax(dim=-1))
print(f"✓ append 后长度: {len(module_list)}")
# 测试 3: extend
module_list.extend([nn.Dropout(0.1), nn.Linear(5, 1)])
print(f"✓ extend 后长度: {len(module_list)}")
# 测试 4: 迭代
print("✓ 迭代 ModuleList:")
for i, module in enumerate(module_list):
print(f" [{i}] {type(module).__name__}")
# 测试 5: 索引访问
print(f"✓ 索引访问 module_list[0]: {type(module_list[0]).__name__}")
# 测试 6: state_dict
state_dict = module_list.state_dict()
print(f"✓ state_dict 键数量: {len(state_dict)}")
print(f"✓ state_dict 包含模块参数: {any('0.' in k for k in state_dict.keys())}")
# 测试 7: 使用 ModuleList 的模型
class TestNet(Module):
def __init__(self):
super().__init__()
self.layers = ModuleList([
nn.Linear(10, 20),
nn.ReLU(),
nn.Linear(20, 5)
])
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
test_model = TestNet()
test_input = torch.randn(2, 10)
test_output = test_model.forward(test_input)
print(f"✓ TestNet 输入形状: {test_input.shape}, 输出形状: {test_output.shape}")
# 测试 8: __add__ 方法
ml1 = ModuleList([nn.Linear(10, 5), nn.ReLU()])
ml2 = ModuleList([nn.Linear(5, 3), nn.Sigmoid()])
ml3 = ml1 + ml2
print(f"✓ __add__ 方法测试: {len(ml1)} + {len(ml2)} = {len(ml3)}")
assert len(ml3) == 4, "合并后的长度应该为 4"
# 测试 9: pop 方法
ml4 = ModuleList([nn.Linear(10, 5), nn.ReLU(), nn.Linear(5, 3)])
popped = ml4.pop()
print(f"✓ pop 方法测试: 弹出后长度 {len(ml4)}, 弹出模块类型 {type(popped).__name__}")
assert len(ml4) == 2, "pop 后长度应该为 2"
assert isinstance(popped, nn.Linear), "弹出的应该是 Linear 模块"
# 测试 10: __repr__ 方法
ml5 = ModuleList([nn.Linear(10, 5), nn.ReLU()])
repr_str = repr(ml5)
print(f"✓ __repr__ 方法测试: 输出包含类名和模块信息")
assert "ModuleList" in repr_str or "InfiniCoreModuleList" in repr_str, "repr 应该包含类名"
assert "Linear" in repr_str, "repr 应该包含模块信息"
print(repr_str)
print("\n=== 所有测试通过! ===")
# ============================================================
# 6. 前向传播集成测试(参考 infinicore_nn_test.py)
# ============================================================
print("\n=== 前向传播集成测试 ===")
# 使用 ModuleList 创建一个简单的模型
class TorchModuleListModel(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.ModuleList([
nn.Linear(10, 20),
nn.ReLU(),
nn.Linear(20, 5)
])
self.scale = nn.Parameter(torch.ones(1) * 0.5)
self.register_buffer("offset", torch.tensor(0.1))
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = x * self.scale + self.offset
return x
class InfiniCoreModuleListModel(Module):
def __init__(self):
super().__init__()
self.layers = ModuleList([
nn.Linear(10, 20),
nn.ReLU(),
nn.Linear(20, 5)
])
self.scale = nn.Parameter(torch.ones(1) * 0.5)
self.register_buffer("offset", torch.tensor(0.1))
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = x * self.scale + self.offset
return x
# 创建模型
torch_model_forward = TorchModuleListModel()
infinicore_model_forward = InfiniCoreModuleListModel()
# 复制权重(确保初始权重一致)
infinicore_model_forward.load_state_dict(torch_model_forward.state_dict(), strict=False)
# 设置为评估模式
torch_model_forward.eval()
infinicore_model_forward.eval()
# 创建测试输入
test_input = torch.randn(2, 10)
# 前向传播
with torch.no_grad():
torch_output = torch_model_forward(test_input)
infinicore_output = infinicore_model_forward.forward(test_input)
# 对比结果
diff = (infinicore_output - torch_output).abs().max().item()
print(f"✓ 前向传播测试 - 输入形状: {test_input.shape}")
print(f"✓ Torch 输出形状: {torch_output.shape}, 均值: {torch_output.detach().numpy().mean():.8f}")
print(f"✓ InfiniCore 输出形状: {infinicore_output.shape}, 均值: {infinicore_output.detach().numpy().mean():.8f}")
print(f"✓ 最大误差: {diff:.8f}")
if diff < 1e-9:
print("✓ 前向传播集成测试通过:ModuleList 与 Torch ModuleList 结果一致!")
else:
print("✗ 前向传播集成测试失败:存在差异")
# ============================================================
# 7. 混合模块兼容性测试(PyTorch + InfiniCore 模块混合使用)
# ============================================================
print("\n=== 混合模块兼容性测试 ===")
# 创建一个自定义的 InfiniCore 模块
class CustomLinear(Module):
def __init__(self, in_features, out_features):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_features, in_features))
self.bias = nn.Parameter(torch.randn(out_features))
def forward(self, x):
return x @ self.weight.t() + self.bias
# 创建混合 ModuleList(包含 PyTorch 模块和 InfiniCore 模块)
mixed_list = ModuleList([
nn.Linear(10, 5), # PyTorch 模块
CustomLinear(5, 3), # 自定义 InfiniCore 模块
nn.ReLU(), # PyTorch 模块
])
print(f"✓ 创建混合 ModuleList,长度: {len(mixed_list)}")
print(f"✓ 模块类型: {[type(m).__name__ for m in mixed_list]}")
# 测试参数注册
param_count = sum(1 for _ in mixed_list.parameters())
print(f"✓ 参数数量: {param_count}")
assert param_count == 4, f"参数数量应该为 4 (Linear: weight+bias, CustomLinear: weight+bias), 实际为 {param_count}"
# 测试 state_dict
mixed_state_dict = mixed_list.state_dict()
print(f"✓ state_dict 键数量: {len(mixed_state_dict)}")
assert len(mixed_state_dict) >= 4, "state_dict 应该包含至少 4 个参数"
# 测试前向传播
test_input_mixed = torch.randn(2, 10)
with torch.no_grad():
x = test_input_mixed
for module in mixed_list:
x = module.forward(x)
print(f"✓ 混合模块前向传播成功,输出形状: {x.shape}")
print("✓ 混合模块兼容性测试通过!")
import safetensors.torch
import torch
import torch.nn as nn
import safetensors
# ============================================================
# 0. infinicore 包导入,配置测试用 safetensors 临时存储路径
# ============================================================
import sys
import os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../python/infinicore')))
save_dir = os.path.join(os.path.dirname(__file__), '../../tmp')
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, "torch_convnet_with_param.safetensors")
# ============================================================
# 1. 使用 PyTorch 定义并保存模型
# ============================================================
print("===== 开始 CPU 一致性测试 =====")
class TorchConvNet(nn.Module):
def __init__(self, in_ch=3, hidden_ch=8, out_ch=3):
super().__init__()
# 主体网络
self.conv1 = nn.Conv2d(in_ch, hidden_ch, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(hidden_ch)
self.conv2 = nn.Conv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(hidden_ch)
self.conv3 = nn.Conv2d(hidden_ch, out_ch, kernel_size=1)
self.relu = nn.ReLU()
# 自定义 Parameter
self.scale = nn.Parameter(torch.ones(1) * 0.5)
# 注册一个 buffer
self.register_buffer("offset", torch.tensor(0.1))
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.conv3(x)
# 应用自定义参数和 buffer
x = x * self.scale + self.offset
return x
# ===== 保存 Torch 模型 =====
torch_model = TorchConvNet()
torch_state_dict = torch_model.state_dict()
safetensors.torch.save_file(torch_state_dict, save_path)
# ============================================================
# 2. 使用 torch 方式加载并推理
# ============================================================
torch_model_infer = TorchConvNet()
torch_model_infer.load_state_dict(safetensors.torch.load_file(save_path))
torch_model_infer.eval()
input = torch.rand(1, 3, 8, 8)
torch_model_out = torch_model_infer(input)
# ============================================================
# 3. 使用 infiniCore.nn.module 加载并推理
# ============================================================
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../python/infinicore')))
from nn import Module
class InfiniCoreConvNet(Module):
def __init__(self, in_ch=3, hidden_ch=8, out_ch=3):
super().__init__()
self.conv1 = nn.Conv2d(in_ch, hidden_ch, kernel_size=3, padding=1)
self.bn1 = nn.BatchNorm2d(hidden_ch)
self.conv2 = nn.Conv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(hidden_ch)
self.conv3 = nn.Conv2d(hidden_ch, out_ch, kernel_size=1)
self.relu = nn.ReLU()
# 保持与 Torch 模型一致的自定义参数和 buffer
self.scale = nn.Parameter(torch.ones(1) * 0.5)
self.register_buffer("offset", torch.tensor(0.1))
def forward(self, x):
x = self.relu(self.bn1(self.conv1(x)))
x = self.relu(self.bn2(self.conv2(x)))
x = self.conv3(x)
x = x * self.scale + self.offset
return x
# ===== 使用 InfiniCoreConvNet 读取 safetensors 并推理 =====
infinicore_model_infer = InfiniCoreConvNet()
infinicore_model_infer.load_state_dict(safetensors.torch.load_file(save_path))
infinicore_model_infer.eval()
infinicore_model_out = infinicore_model_infer.forward(input)
# ============================================================
# 4. 对比结果
# ============================================================
diff_cpu = (infinicore_model_out - torch_model_out).abs().max().item()
print(f"InfiniCoreModule 与 Torch (CPU) 最大误差: {diff_cpu:.6e}")
if diff_cpu < 1e-9:
print("CPU 模式下 InfiniCore 与 Torch 输出完全一致.")
else:
print("CPU 模式下输出存在差异.")
# ============================================================
# 5. GPU 一致性测试(可选)
# ============================================================
if torch.cuda.is_available():
print("\n===== 开始 GPU 一致性测试 =====")
# 将模型与输入都迁移到 GPU
torch_model_infer_gpu = TorchConvNet().to("cuda")
torch_model_infer_gpu.load_state_dict(safetensors.torch.load_file(save_path))
torch_model_infer_gpu.eval()
infinicore_model_infer_gpu = InfiniCoreConvNet().to("cuda")
infinicore_model_infer_gpu.load_state_dict(safetensors.torch.load_file(save_path))
infinicore_model_infer_gpu.eval()
# 生成 GPU 输入
input_gpu = input.to("cuda")
# 分别前向推理
torch_out_gpu = torch_model_infer_gpu(input_gpu)
infinicore_out_gpu = infinicore_model_infer_gpu.forward(input_gpu)
# 结果比较
diff_gpu = (infinicore_out_gpu - torch_out_gpu).abs().max().item()
print(f"InfiniCoreModule 与 Torch (GPU) 最大误差: {diff_gpu:.6e}")
if diff_gpu < 1e-9:
print("GPU 模式下 InfiniCore 与 Torch 输出完全一致.")
else:
print("GPU 模式下输出存在差异.")
else:
print("\n 未检测到 GPU,跳过 GPU 一致性测试。")
\ No newline at end of file
# ============================================================
# 0. infinicore 包导入,配置测试用 safetensors 临时存储路径
# ============================================================
import os
import sys
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), "../../python/infinicore"))
)
save_dir = os.path.join(os.path.dirname(__file__), "../../tmp")
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, "torch_convnet_with_param.safetensors")
import infinicore # noqa: E402
from infinicore.nn import Module # noqa: E402
# ============================================================
# 1. 定义模型
# ============================================================
device_str = "cuda"
class InfiniCoreNet(Module):
def __init__(self):
super().__init__()
self.a = infinicore.nn.Parameter(
infinicore.empty(
(1, 2, 3),
dtype=infinicore.float32,
device=infinicore.device(device_str),
)
)
self.b = infinicore.nn.Parameter(
infinicore.empty(
(1, 2, 3),
dtype=infinicore.float32,
device=infinicore.device(device_str),
)
)
def forward(self):
return infinicore.add(self.a, self.b)
infinicore_model_infer = InfiniCoreNet()
# ============================================================
# 2. 加载权重
# ============================================================
params_dict = {
"a": infinicore.empty(
(1, 2, 3), dtype=infinicore.float32, device=infinicore.device(device_str, 0)
),
"b": infinicore.empty(
(1, 2, 3), dtype=infinicore.float32, device=infinicore.device(device_str, 0)
),
}
infinicore_model_infer.load_state_dict(params_dict)
# ============================================================
# 3. 计算
# ============================================================
infinicore_model_out = infinicore_model_infer()
ref_out = infinicore.add(params_dict["a"], params_dict["b"])
# ============================================================
# 4. 对比结果
# ============================================================
print("InfiniCoreModule 与 Torch (CPU) 最大误差: 手动查看 ")
infinicore_model_out.debug()
ref_out.debug()
# ============================================================
# 5. to测试,buffer测试
# ============================================================
# 等待添加
import os
# ============================================================
# 0. infinicore 包导入,配置测试用 safetensors 临时存储路径
# ============================================================
import sys
import safetensors
import safetensors.torch
import torch
import torch.nn as nn
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), "../../python/infinicore"))
)
# 使用临时目录,如果不存在则自动创建
save_dir = os.path.join(os.path.dirname(__file__), "../../tmp")
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, "torch_modulelist_with_param.safetensors")
def test():
# ============================================================
# 1. 使用 PyTorch 定义并保存模型(使用 torch.nn.ModuleList)
# ============================================================
class TorchModuleListNet(nn.Module):
def __init__(self, in_ch=3, hidden_ch=8, out_ch=3):
super().__init__()
# 使用 torch.nn.ModuleList
self.layers = nn.ModuleList(
[
nn.Conv2d(in_ch, hidden_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(hidden_ch),
nn.ReLU(),
nn.Conv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(hidden_ch),
nn.ReLU(),
nn.Conv2d(hidden_ch, out_ch, kernel_size=1),
]
)
# 自定义 Parameter
self.scale = nn.Parameter(torch.ones(1) * 0.5)
self.register_buffer("offset", torch.tensor(0.1))
def forward(self, x):
# 遍历 ModuleList 中的所有层
for layer in self.layers:
x = layer(x)
# 应用自定义参数和 buffer
x = x * self.scale + self.offset
return x
# ===== 保存 Torch 模型 =====
torch_model = TorchModuleListNet()
torch_state_dict = torch_model.state_dict()
safetensors.torch.save_file(torch_state_dict, save_path)
print("✓ PyTorch 模型已保存")
# ============================================================
# 2. 使用 torch 方式加载并推理
# ============================================================
torch_model_infer = TorchModuleListNet()
torch_model_infer.load_state_dict(safetensors.torch.load_file(save_path))
torch_model_infer.eval()
input = torch.rand(1, 3, 8, 8)
torch_model_out = torch_model_infer(input)
print("✓ Torch 输出:", torch_model_out.detach().numpy().mean())
# ============================================================
# 3. 使用 ModuleList 加载并推理
# ============================================================
from nn.modules import Module, ModuleList
class InfiniCoreModuleListNet(Module):
def __init__(self, in_ch=3, hidden_ch=8, out_ch=3):
super().__init__()
# 使用 ModuleList
self.layers = ModuleList(
[
nn.Conv2d(in_ch, hidden_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(hidden_ch),
nn.ReLU(),
nn.Conv2d(hidden_ch, hidden_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(hidden_ch),
nn.ReLU(),
nn.Conv2d(hidden_ch, out_ch, kernel_size=1),
]
)
# 保持与 Torch 模型一致的自定义参数和 buffer
self.scale = nn.Parameter(torch.ones(1) * 0.5)
self.register_buffer("offset", torch.tensor(0.1))
def forward(self, x):
# 遍历 ModuleList 中的所有层
for layer in self.layers:
x = layer(x)
x = x * self.scale + self.offset
return x
# ===== 使用 ModuleListNet 读取 safetensors 并推理 =====
infinicore_model_infer = InfiniCoreModuleListNet()
infinicore_model_infer.load_state_dict(safetensors.torch.load_file(save_path))
infinicore_model_infer.eval()
infinicore_model_out = infinicore_model_infer.forward(input)
print("✓ InfiniCore 输出:", infinicore_model_out.detach().numpy().mean())
# ============================================================
# 4. 对比结果
# ============================================================
diff = (infinicore_model_out - torch_model_out).abs().max().item()
print(f"✓ ModuleList 与 Torch 最大误差: {diff:.8f}")
if diff < 1e-9:
print("✓ ModuleList 与 Torch 精度一致.")
else:
print("✗ ModuleList 与 Torch 精度存在差异.")
# ============================================================
# 5. 测试 ModuleList 的基本功能
# ============================================================
print("\n=== 测试 ModuleList 基本功能 ===")
# 测试 1: 创建和访问
module_list = ModuleList([nn.Linear(10, 20), nn.ReLU(), nn.Linear(20, 5)])
print(f"✓ 创建 ModuleList,长度: {len(module_list)}")
print(f"✓ 访问第一个模块: {type(module_list[0]).__name__}")
print(f"✓ 访问第二个模块: {type(module_list[1]).__name__}")
# 测试 2: append
module_list.append(nn.Softmax(dim=-1))
print(f"✓ append 后长度: {len(module_list)}")
# 测试 3: extend
module_list.extend([nn.Dropout(0.1), nn.Linear(5, 1)])
print(f"✓ extend 后长度: {len(module_list)}")
# 测试 4: 迭代
print("✓ 迭代 ModuleList:")
for i, module in enumerate(module_list):
print(f" [{i}] {type(module).__name__}")
# 测试 5: 索引访问
print(f"✓ 索引访问 module_list[0]: {type(module_list[0]).__name__}")
# 测试 6: state_dict
state_dict = module_list.state_dict()
print(f"✓ state_dict 键数量: {len(state_dict)}")
print(f"✓ state_dict 包含模块参数: {any('0.' in k for k in state_dict.keys())}")
# 测试 7: 使用 ModuleList 的模型
class TestNet(Module):
def __init__(self):
super().__init__()
self.layers = ModuleList([nn.Linear(10, 20), nn.ReLU(), nn.Linear(20, 5)])
def forward(self, x):
for layer in self.layers:
x = layer(x)
return x
test_model = TestNet()
test_input = torch.randn(2, 10)
test_output = test_model.forward(test_input)
print(f"✓ TestNet 输入形状: {test_input.shape}, 输出形状: {test_output.shape}")
# 测试 8: __add__ 方法
ml1 = ModuleList([nn.Linear(10, 5), nn.ReLU()])
ml2 = ModuleList([nn.Linear(5, 3), nn.Sigmoid()])
ml3 = ml1 + ml2
print(f"✓ __add__ 方法测试: {len(ml1)} + {len(ml2)} = {len(ml3)}")
assert len(ml3) == 4, "合并后的长度应该为 4"
# 测试 9: pop 方法
ml4 = ModuleList([nn.Linear(10, 5), nn.ReLU(), nn.Linear(5, 3)])
popped = ml4.pop()
print(
f"✓ pop 方法测试: 弹出后长度 {len(ml4)}, 弹出模块类型 {type(popped).__name__}"
)
assert len(ml4) == 2, "pop 后长度应该为 2"
assert isinstance(popped, nn.Linear), "弹出的应该是 Linear 模块"
# 测试 10: __repr__ 方法
ml5 = ModuleList([nn.Linear(10, 5), nn.ReLU()])
repr_str = repr(ml5)
print(f"✓ __repr__ 方法测试: 输出包含类名和模块信息")
assert "ModuleList" in repr_str or "InfiniCoreModuleList" in repr_str, (
"repr 应该包含类名"
)
assert "Linear" in repr_str, "repr 应该包含模块信息"
print(repr_str)
print("\n=== 所有测试通过! ===")
# ============================================================
# 6. 前向传播集成测试(参考 infinicore_nn_test.py)
# ============================================================
print("\n=== 前向传播集成测试 ===")
# 使用 ModuleList 创建一个简单的模型
class TorchModuleListModel(nn.Module):
def __init__(self):
super().__init__()
self.layers = nn.ModuleList(
[nn.Linear(10, 20), nn.ReLU(), nn.Linear(20, 5)]
)
self.scale = nn.Parameter(torch.ones(1) * 0.5)
self.register_buffer("offset", torch.tensor(0.1))
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = x * self.scale + self.offset
return x
class InfiniCoreModuleListModel(Module):
def __init__(self):
super().__init__()
self.layers = ModuleList([nn.Linear(10, 20), nn.ReLU(), nn.Linear(20, 5)])
self.scale = nn.Parameter(torch.ones(1) * 0.5)
self.register_buffer("offset", torch.tensor(0.1))
def forward(self, x):
for layer in self.layers:
x = layer(x)
x = x * self.scale + self.offset
return x
# 创建模型
torch_model_forward = TorchModuleListModel()
infinicore_model_forward = InfiniCoreModuleListModel()
# 复制权重(确保初始权重一致)
infinicore_model_forward.load_state_dict(
torch_model_forward.state_dict(), strict=False
)
# 设置为评估模式
torch_model_forward.eval()
infinicore_model_forward.eval()
# 创建测试输入
test_input = torch.randn(2, 10)
# 前向传播
with torch.no_grad():
torch_output = torch_model_forward(test_input)
infinicore_output = infinicore_model_forward.forward(test_input)
# 对比结果
diff = (infinicore_output - torch_output).abs().max().item()
print(f"✓ 前向传播测试 - 输入形状: {test_input.shape}")
print(
f"✓ Torch 输出形状: {torch_output.shape}, 均值: {torch_output.detach().numpy().mean():.8f}"
)
print(
f"✓ InfiniCore 输出形状: {infinicore_output.shape}, 均值: {infinicore_output.detach().numpy().mean():.8f}"
)
print(f"✓ 最大误差: {diff:.8f}")
if diff < 1e-9:
print("✓ 前向传播集成测试通过:ModuleList 与 Torch ModuleList 结果一致!")
else:
print("✗ 前向传播集成测试失败:存在差异")
# ============================================================
# 7. 混合模块兼容性测试(PyTorch + InfiniCore 模块混合使用)
# ============================================================
print("\n=== 混合模块兼容性测试 ===")
# 创建一个自定义的 InfiniCore 模块
class CustomLinear(Module):
def __init__(self, in_features, out_features):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_features, in_features))
self.bias = nn.Parameter(torch.randn(out_features))
def forward(self, x):
return x @ self.weight.t() + self.bias
# 创建混合 ModuleList(包含 PyTorch 模块和 InfiniCore 模块)
mixed_list = ModuleList(
[
nn.Linear(10, 5), # PyTorch 模块
CustomLinear(5, 3), # 自定义 InfiniCore 模块
nn.ReLU(), # PyTorch 模块
]
)
print(f"✓ 创建混合 ModuleList,长度: {len(mixed_list)}")
print(f"✓ 模块类型: {[type(m).__name__ for m in mixed_list]}")
# 测试参数注册
param_count = sum(1 for _ in mixed_list.parameters())
print(f"✓ 参数数量: {param_count}")
assert param_count == 4, (
f"参数数量应该为 4 (Linear: weight+bias, CustomLinear: weight+bias), 实际为 {param_count}"
)
# 测试 state_dict
mixed_state_dict = mixed_list.state_dict()
print(f"✓ state_dict 键数量: {len(mixed_state_dict)}")
assert len(mixed_state_dict) >= 4, "state_dict 应该包含至少 4 个参数"
# 测试前向传播
test_input_mixed = torch.randn(2, 10)
with torch.no_grad():
x = test_input_mixed
for module in mixed_list:
x = module.forward(x)
print(f"✓ 混合模块前向传播成功,输出形状: {x.shape}")
print("✓ 混合模块兼容性测试通过!")
import safetensors.torch
import torch
import torch.nn as nn
import safetensors
# ============================================================
# 0. infinicore 包导入,配置测试用 safetensors 临时存储路径
# ============================================================
import sys
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../python/infinicore')))
save_dir = os.path.join(os.path.dirname(__file__), '../../tmp')
import torch
import torch.nn as nn
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), "../../python/infinicore"))
)
save_dir = os.path.join(os.path.dirname(__file__), "../../tmp")
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, "infinicore_parameter_test.safetensors")
# ============================================================
# 1. 使用 PyTorch 定义并保存模型(使用 torch.nn.Parameter)
# ============================================================
class TorchParameterNet(nn.Module):
def __init__(self, in_features=10, out_features=5):
import infinicore # noqa: E402
from infinicore.nn import Module, Parameter # noqa: E402
device_str = "cuda"
class InfiniCoreParameterNet(Module):
def __init__(self):
super().__init__()
self.weight = nn.Parameter(torch.randn(out_features, in_features))
self.bias = nn.Parameter(torch.randn(out_features))
self.scale = nn.Parameter(torch.ones(1) * 0.5)
self.register_buffer("offset", torch.tensor(0.1))
self.a = infinicore.nn.Parameter(
infinicore.empty(
(1, 2, 3), dtype=infinicore.float32, device=infinicore.device("cpu", 0)
)
)
def forward(self, x):
return (x @ self.weight.t() + self.bias) * self.scale + self.offset
return infinicore.add(self.a, x)
# ===== 保存 Torch 模型 =====
torch_model = TorchParameterNet()
torch_state_dict = torch_model.state_dict()
safetensors.torch.save_file(torch_state_dict, save_path)
print("✓ PyTorch 模型已保存")
infinicore_model_infer = InfiniCoreParameterNet()
# ============================================================
# 2. 使用 torch 方式加载并推理
# 2. 加载权重
# ============================================================
params_dict = {
"a": infinicore.empty(
(1, 2, 3), dtype=infinicore.float32, device=infinicore.device(device_str, 0)
)
}
infinicore_model_infer.load_state_dict(params_dict)
torch_model_infer = TorchParameterNet()
torch_model_infer.load_state_dict(safetensors.torch.load_file(save_path))
torch_model_infer.eval()
input = torch.randn(2, 10)
torch_model_out = torch_model_infer(input)
print("✓ Torch 输出:", torch_model_out.detach().numpy().mean())
# ============================================================
# 3. 使用 Parameter 加载并推理
# 3. 计算
# ============================================================
x = infinicore.empty(
(1, 2, 3), dtype=infinicore.float32, device=infinicore.device(device_str, 0)
)
from nn.modules import Module, Parameter
class InfiniCoreParameterNet(Module):
def __init__(self, in_features=10, out_features=5):
super().__init__()
# 使用 Parameter 替代 torch.nn.Parameter
self.weight = Parameter(torch.randn(out_features, in_features))
self.bias = Parameter(torch.randn(out_features))
self.scale = Parameter(torch.ones(1) * 0.5)
self.register_buffer("offset", torch.tensor(0.1))
def forward(self, x):
return (x @ self.weight.t() + self.bias) * self.scale + self.offset
# ===== 使用 InfiniCoreParameterNet 读取 safetensors 并推理 =====
infinicore_model_infer = InfiniCoreParameterNet()
infinicore_model_infer.load_state_dict(safetensors.torch.load_file(save_path))
infinicore_model_infer.eval()
infinicore_model_out = infinicore_model_infer.forward(input)
print("✓ InfiniCore 输出:", infinicore_model_out.detach().numpy().mean())
infinicore_model_out = infinicore_model_infer(x)
ref_out = infinicore.add(params_dict["a"], x)
# ============================================================
# 4. 对比结果
# ============================================================
print("InfiniCoreModule 与 Torch (CPU) 最大误差: 手动查看 ")
infinicore_model_out.debug()
ref_out.debug()
diff = (infinicore_model_out - torch_model_out).abs().max().item()
print(f"✓ Parameter 与 Torch 最大误差: {diff:.8f}")
if diff < 1e-9:
print("✓ Parameter 与 Torch 精度一致.")
else:
print("✗ Parameter 与 Torch 精度存在差异.")
# ============================================================
# 5. 测试 Parameter 的基本功能
......@@ -93,28 +73,37 @@ else:
print("\n=== 测试 Parameter 基本功能 ===")
# 测试 1: 创建 Parameter
param1 = Parameter(torch.randn(5, 10))
param1 = infinicore.nn.Parameter(
infinicore.empty(
(1, 2, 3), dtype=infinicore.float32, device=infinicore.device(device_str, 0)
)
)
print(f"✓ 创建 Parameter,形状: {param1.shape}")
# 检查是否是 Parameter 类型(可能是 InfiniCoreParameter 的别名)
from nn.modules.parameter import InfiniCoreParameter
assert isinstance(param1, (Parameter, InfiniCoreParameter)), "应该是 Parameter 类型"
assert isinstance(param1, torch.Tensor), "应该是 torch.Tensor 的子类"
# 测试 2: requires_grad
param2 = Parameter(torch.randn(3, 4), requires_grad=False)
print(f"✓ 创建 requires_grad=False 的 Parameter: {param2.requires_grad}")
assert not param2.requires_grad, "requires_grad 应该为 False"
assert isinstance(param1, infinicore.nn.Parameter), "应该是 Parameter 类型"
assert isinstance(param1, infinicore.Tensor), "应该是 torch.Tensor 的子类"
param3 = Parameter(torch.randn(3, 4), requires_grad=True)
print(f"✓ 创建 requires_grad=True 的 Parameter: {param3.requires_grad}")
assert param3.requires_grad, "requires_grad 应该为 True"
# 测试 3: 自动注册到 Module
class TestModule(Module):
def __init__(self):
super().__init__()
self.weight = Parameter(torch.randn(5, 10))
self.bias = Parameter(torch.randn(5))
self.weight = infinicore.nn.Parameter(
infinicore.empty(
(1, 2, 3),
dtype=infinicore.float32,
device=infinicore.device(device_str),
)
)
self.bias = infinicore.nn.Parameter(
infinicore.empty(
(1, 2, 3),
dtype=infinicore.float32,
device=infinicore.device(device_str),
)
)
test_module = TestModule()
param_count = sum(1 for _ in test_module.parameters())
......@@ -129,8 +118,8 @@ print("✓ 参数可以通过属性访问")
# 测试 5: state_dict
state_dict = test_module.state_dict()
print(f"✓ state_dict 键数量: {len(state_dict)}")
assert 'weight' in state_dict, "state_dict 应该包含 weight"
assert 'bias' in state_dict, "state_dict 应该包含 bias"
assert "weight" in state_dict, "state_dict 应该包含 weight"
assert "bias" in state_dict, "state_dict 应该包含 bias"
print(f"✓ state_dict 键: {list(state_dict.keys())}")
# 测试 6: __repr__
......@@ -139,46 +128,21 @@ print(f"✓ __repr__ 方法: 输出包含类名")
assert "Parameter" in repr_str or "InfiniCoreParameter" in repr_str, "repr 应该包含类名"
print(repr_str[:100] + "...")
# 测试 7: 与 torch.nn.Parameter 兼容性
class MixedModule(Module):
def __init__(self):
super().__init__()
self.torch_param = nn.Parameter(torch.randn(3, 4))
self.infinicore_param = Parameter(torch.randn(3, 4))
mixed_module = MixedModule()
mixed_param_count = sum(1 for _ in mixed_module.parameters())
print(f"✓ 混合使用 torch.nn.Parameter 和 Parameter,参数数量: {mixed_param_count}")
assert mixed_param_count == 2, f"应该有 2 个参数,实际为 {mixed_param_count}"
# 测试 8: 前向传播
class TestModuleWithForward(Module):
def __init__(self):
super().__init__()
self.weight = Parameter(torch.randn(5, 10))
self.bias = Parameter(torch.randn(5))
def forward(self, x):
return x @ self.weight.t() + self.bias
test_module_forward = TestModuleWithForward()
test_input = torch.randn(2, 10)
with torch.no_grad():
output = test_module_forward.forward(test_input)
print(f"✓ 前向传播成功,输出形状: {output.shape}")
assert output.shape == (2, 5), f"输出形状应该是 (2, 5),实际为 {output.shape}"
# 测试 9: 从 None 创建
param_empty = Parameter(None)
print(f"✓ 从 None 创建 Parameter,形状: {param_empty.shape}")
assert param_empty.shape == torch.Size([0]), "从 None 创建应该是空张量"
# param_empty = Parameter(None)
# print(f"✓ 从 None 创建 Parameter,形状: {param_empty.shape}")
# assert param_empty.shape == torch.Size([0]), "从 None 创建应该是空张量"
# 测试 10: 深拷贝
import copy
param_copy = copy.deepcopy(param1)
print(f"✓ 深拷贝 Parameter,形状: {param_copy.shape}")
assert param_copy.shape == param1.shape, "深拷贝后形状应该相同"
assert not torch.equal(param_copy, param1) or id(param_copy) != id(param1), "深拷贝应该是新对象"
# import copy
print("\n=== 所有测试通过! ===")
# param_copy = copy.deepcopy(param1)
# print(f"✓ 深拷贝 Parameter,形状: {param_copy.shape}")
# assert param_copy.shape == param1.shape, "深拷贝后形状应该相同"
# assert not torch.equal(param_copy, param1) or id(param_copy) != id(param1), (
# "深拷贝应该是新对象"
# )
print("\n=== 所有测试通过! ===")
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment