Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
jerrrrry
infinicore
Commits
0b2ea12d
Commit
0b2ea12d
authored
Nov 07, 2025
by
zhuyue
Committed by
zhuyue
Nov 17, 2025
Browse files
Issue/568: feat: add infinicore.nn.InfiniCoreParameter referencing torch.nn.Parameter and tests.
parent
cad51297
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
330 additions
and
8 deletions
+330
-8
python/infinicore/nn/modules/__init__.py
python/infinicore/nn/modules/__init__.py
+1
-0
python/infinicore/nn/modules/module.py
python/infinicore/nn/modules/module.py
+12
-8
python/infinicore/nn/modules/parameter.py
python/infinicore/nn/modules/parameter.py
+133
-0
test/infinicore/infinicore_parameter_test.py
test/infinicore/infinicore_parameter_test.py
+184
-0
No files found.
python/infinicore/nn/modules/__init__.py
View file @
0b2ea12d
from
.module
import
InfiniCoreModule
as
Module
from
.module
import
InfiniCoreModule
as
Module
from
.module_list
import
InfiniCoreModuleList
as
ModuleList
from
.module_list
import
InfiniCoreModuleList
as
ModuleList
from
.parameter
import
InfiniCoreParameter
as
Parameter
python/infinicore/nn/modules/module.py
View file @
0b2ea12d
...
@@ -46,7 +46,7 @@ class InfiniCoreModule:
...
@@ -46,7 +46,7 @@ class InfiniCoreModule:
_version
:
int
=
1
_version
:
int
=
1
training
:
bool
training
:
bool
_parameters
:
Dict
[
str
,
Optional
[
torch
.
nn
.
Parameter
]]
_parameters
:
Dict
[
str
,
Optional
[
Union
[
torch
.
nn
.
Parameter
,
'InfiniCoreParameter'
]
]]
_buffers
:
Dict
[
str
,
Optional
[
torch
.
Tensor
]]
_buffers
:
Dict
[
str
,
Optional
[
torch
.
Tensor
]]
_non_persistent_buffers_set
:
Set
[
str
]
_non_persistent_buffers_set
:
Set
[
str
]
_modules
:
Dict
[
str
,
Optional
[
'InfiniCoreModule'
]]
_modules
:
Dict
[
str
,
Optional
[
'InfiniCoreModule'
]]
...
@@ -84,7 +84,9 @@ class InfiniCoreModule:
...
@@ -84,7 +84,9 @@ class InfiniCoreModule:
d
.
discard
(
name
)
d
.
discard
(
name
)
params
=
self
.
__dict__
.
get
(
"_parameters"
)
params
=
self
.
__dict__
.
get
(
"_parameters"
)
if
isinstance
(
value
,
torch
.
nn
.
Parameter
):
# Support both torch.nn.Parameter and InfiniCoreParameter
from
.parameter
import
InfiniCoreParameter
if
isinstance
(
value
,
(
torch
.
nn
.
Parameter
,
InfiniCoreParameter
)):
if
params
is
None
:
if
params
is
None
:
raise
AttributeError
(
raise
AttributeError
(
"cannot assign parameters before Module.__init__() call"
"cannot assign parameters before Module.__init__() call"
...
@@ -100,7 +102,7 @@ class InfiniCoreModule:
...
@@ -100,7 +102,7 @@ class InfiniCoreModule:
if
value
is
not
None
:
if
value
is
not
None
:
raise
TypeError
(
raise
TypeError
(
f
"cannot assign '
{
torch
.
typename
(
value
)
}
' as parameter '
{
name
}
' "
f
"cannot assign '
{
torch
.
typename
(
value
)
}
' as parameter '
{
name
}
' "
"(torch.nn.Parameter or None expected)"
"(torch.nn.Parameter
, InfiniCoreParameter
or None expected)"
)
)
self
.
register_parameter
(
name
,
value
)
self
.
register_parameter
(
name
,
value
)
else
:
else
:
...
@@ -239,12 +241,14 @@ class InfiniCoreModule:
...
@@ -239,12 +241,14 @@ class InfiniCoreModule:
if
param
is
None
:
if
param
is
None
:
self
.
_parameters
[
name
]
=
None
self
.
_parameters
[
name
]
=
None
elif
not
isinstance
(
param
,
torch
.
nn
.
Parameter
):
raise
TypeError
(
f
"cannot assign '
{
torch
.
typename
(
param
)
}
' object to parameter '
{
name
}
' "
"(torch.nn.Parameter or None required)"
)
else
:
else
:
# Support both torch.nn.Parameter and InfiniCoreParameter
from
.parameter
import
InfiniCoreParameter
if
not
isinstance
(
param
,
(
torch
.
nn
.
Parameter
,
InfiniCoreParameter
)):
raise
TypeError
(
f
"cannot assign '
{
torch
.
typename
(
param
)
}
' object to parameter '
{
name
}
' "
"(torch.nn.Parameter, InfiniCoreParameter or None required)"
)
self
.
_parameters
[
name
]
=
param
self
.
_parameters
[
name
]
=
param
def
get_extra_state
(
self
)
->
Any
:
def
get_extra_state
(
self
)
->
Any
:
...
...
python/infinicore/nn/modules/parameter.py
0 → 100644
View file @
0b2ea12d
# Copyright (c) 2025, InfiniCore
#
# This file contains modified code derived from PyTorch's `torch.nn.Parameter`
# implementation, which is licensed under the BSD 3-Clause License.
#
# The modifications include adaptations for the InfiniCore framework.
#
# Original PyTorch source:
# https://github.com/pytorch/pytorch/blob/main/torch/nn/parameter.py
#
# Referencing PyTorch v2.4.0
#
# The use of this file is governed by the BSD 3-Clause License.
import
torch
from
typing
import
Optional
from
collections
import
OrderedDict
class
InfiniCoreParameter
(
torch
.
Tensor
):
r
"""A kind of Tensor that is to be considered a module parameter.
Parameters are :class:`~torch.Tensor` subclasses, that have a
very special property when used with :class:`InfiniCoreModule` s - when they're
assigned as Module attributes they are automatically added to the list of
its parameters, and will appear e.g. in :meth:`~InfiniCoreModule.parameters` iterator.
Assigning a Tensor doesn't have such effect. This is because one might
want to cache some temporary state, like last hidden state of the RNN, in
the model. If there was no such class as :class:`InfiniCoreParameter`, these
temporaries would get registered too.
Args:
data (Tensor, optional): parameter tensor. If None, creates an empty tensor.
requires_grad (bool, optional): if the parameter requires gradient. Note that
the torch.no_grad() context does NOT affect the default behavior of
Parameter creation--the Parameter will still have `requires_grad=True` in
:class:`~no_grad` mode. See :ref:`locally-disable-grad-doc` for more
details. Default: `True`
Example::
>>> import torch
>>> from infinicore.nn.modules import InfiniCoreModule, InfiniCoreParameter
>>>
>>> class MyModule(InfiniCoreModule):
... def __init__(self):
... super().__init__()
... self.weight = InfiniCoreParameter(torch.randn(10, 5))
... self.bias = InfiniCoreParameter(torch.randn(5))
...
>>> module = MyModule()
>>> for param in module.parameters():
... print(param.shape)
torch.Size([10, 5])
torch.Size([5])
"""
def
__new__
(
cls
,
data
:
Optional
[
torch
.
Tensor
]
=
None
,
requires_grad
:
bool
=
True
):
if
data
is
None
:
data
=
torch
.
empty
(
0
)
# Handle standard torch.Tensor or InfiniCoreParameter
if
type
(
data
)
is
torch
.
Tensor
or
type
(
data
)
is
InfiniCoreParameter
:
# For ease of BC maintenance, keep this path for standard Tensor.
# Eventually (tm), we should change the behavior for standard Tensor to match.
return
torch
.
Tensor
.
_make_subclass
(
cls
,
data
,
requires_grad
)
# Path for custom tensors: set a flag on the instance to indicate parameter-ness.
t
=
data
.
detach
().
requires_grad_
(
requires_grad
)
if
type
(
t
)
is
not
type
(
data
):
raise
RuntimeError
(
f
"Creating a InfiniCoreParameter from an instance of type
{
type
(
data
).
__name__
}
"
"requires that detach() returns an instance of the same type, but return "
f
"type
{
type
(
t
).
__name__
}
was found instead. To use the type as a "
"InfiniCoreParameter, please correct the detach() semantics defined by "
"its __torch_dispatch__() implementation."
)
t
.
_is_param
=
True
return
t
# Note: the 3 methods below only apply to standard Tensor. Parameters of custom tensor types
# are still considered that custom tensor type and these methods will not be called for them.
def
__deepcopy__
(
self
,
memo
):
if
id
(
self
)
in
memo
:
return
memo
[
id
(
self
)]
else
:
result
=
type
(
self
)(
self
.
data
.
clone
(
memory_format
=
torch
.
preserve_format
),
self
.
requires_grad
)
memo
[
id
(
self
)]
=
result
return
result
def
__repr__
(
self
):
return
"InfiniCoreParameter containing:
\n
"
+
super
().
__repr__
()
def
__reduce_ex__
(
self
,
proto
):
# Simplified version for serialization
# In a full implementation, you might want to handle hooks and state
state
=
getattr
(
self
,
'_state'
,
None
)
hooks
=
OrderedDict
()
if
not
state
:
return
(
_rebuild_parameter
,
(
self
.
data
,
self
.
requires_grad
,
hooks
),
)
return
(
_rebuild_parameter_with_state
,
(
self
.
data
,
self
.
requires_grad
,
hooks
,
state
),
)
# Note: __torch_function__ is handled by the Tensor base class
# We don't need to override it for standard Parameter behavior
def
_rebuild_parameter
(
data
,
requires_grad
,
hooks
):
"""Rebuild a parameter from serialized data."""
param
=
InfiniCoreParameter
(
data
,
requires_grad
)
# Apply hooks if any (simplified - full implementation would restore hooks)
return
param
def
_rebuild_parameter_with_state
(
data
,
requires_grad
,
hooks
,
state
):
"""Rebuild a parameter with extra state from serialized data."""
param
=
InfiniCoreParameter
(
data
,
requires_grad
)
param
.
_state
=
state
# Apply hooks if any (simplified - full implementation would restore hooks)
return
param
test/infinicore/infinicore_parameter_test.py
0 → 100644
View file @
0b2ea12d
import
safetensors.torch
import
torch
import
torch.nn
as
nn
import
safetensors
# ============================================================
# 0. infinicore 包导入,配置测试用 safetensors 临时存储路径
# ============================================================
import
sys
import
os
sys
.
path
.
append
(
os
.
path
.
abspath
(
os
.
path
.
join
(
os
.
path
.
dirname
(
__file__
),
'../../python/infinicore'
)))
save_dir
=
os
.
path
.
join
(
os
.
path
.
dirname
(
__file__
),
'../../tmp'
)
os
.
makedirs
(
save_dir
,
exist_ok
=
True
)
save_path
=
os
.
path
.
join
(
save_dir
,
"infinicore_parameter_test.safetensors"
)
# ============================================================
# 1. 使用 PyTorch 定义并保存模型(使用 torch.nn.Parameter)
# ============================================================
class
TorchParameterNet
(
nn
.
Module
):
def
__init__
(
self
,
in_features
=
10
,
out_features
=
5
):
super
().
__init__
()
self
.
weight
=
nn
.
Parameter
(
torch
.
randn
(
out_features
,
in_features
))
self
.
bias
=
nn
.
Parameter
(
torch
.
randn
(
out_features
))
self
.
scale
=
nn
.
Parameter
(
torch
.
ones
(
1
)
*
0.5
)
self
.
register_buffer
(
"offset"
,
torch
.
tensor
(
0.1
))
def
forward
(
self
,
x
):
return
(
x
@
self
.
weight
.
t
()
+
self
.
bias
)
*
self
.
scale
+
self
.
offset
# ===== 保存 Torch 模型 =====
torch_model
=
TorchParameterNet
()
torch_state_dict
=
torch_model
.
state_dict
()
safetensors
.
torch
.
save_file
(
torch_state_dict
,
save_path
)
print
(
"✓ PyTorch 模型已保存"
)
# ============================================================
# 2. 使用 torch 方式加载并推理
# ============================================================
torch_model_infer
=
TorchParameterNet
()
torch_model_infer
.
load_state_dict
(
safetensors
.
torch
.
load_file
(
save_path
))
torch_model_infer
.
eval
()
input
=
torch
.
randn
(
2
,
10
)
torch_model_out
=
torch_model_infer
(
input
)
print
(
"✓ Torch 输出:"
,
torch_model_out
.
detach
().
numpy
().
mean
())
# ============================================================
# 3. 使用 Parameter 加载并推理
# ============================================================
from
nn.modules
import
Module
,
Parameter
class
InfiniCoreParameterNet
(
Module
):
def
__init__
(
self
,
in_features
=
10
,
out_features
=
5
):
super
().
__init__
()
# 使用 Parameter 替代 torch.nn.Parameter
self
.
weight
=
Parameter
(
torch
.
randn
(
out_features
,
in_features
))
self
.
bias
=
Parameter
(
torch
.
randn
(
out_features
))
self
.
scale
=
Parameter
(
torch
.
ones
(
1
)
*
0.5
)
self
.
register_buffer
(
"offset"
,
torch
.
tensor
(
0.1
))
def
forward
(
self
,
x
):
return
(
x
@
self
.
weight
.
t
()
+
self
.
bias
)
*
self
.
scale
+
self
.
offset
# ===== 使用 InfiniCoreParameterNet 读取 safetensors 并推理 =====
infinicore_model_infer
=
InfiniCoreParameterNet
()
infinicore_model_infer
.
load_state_dict
(
safetensors
.
torch
.
load_file
(
save_path
))
infinicore_model_infer
.
eval
()
infinicore_model_out
=
infinicore_model_infer
.
forward
(
input
)
print
(
"✓ InfiniCore 输出:"
,
infinicore_model_out
.
detach
().
numpy
().
mean
())
# ============================================================
# 4. 对比结果
# ============================================================
diff
=
(
infinicore_model_out
-
torch_model_out
).
abs
().
max
().
item
()
print
(
f
"✓ Parameter 与 Torch 最大误差:
{
diff
:.
8
f
}
"
)
if
diff
<
1e-9
:
print
(
"✓ Parameter 与 Torch 精度一致."
)
else
:
print
(
"✗ Parameter 与 Torch 精度存在差异."
)
# ============================================================
# 5. 测试 Parameter 的基本功能
# ============================================================
print
(
"
\n
=== 测试 Parameter 基本功能 ==="
)
# 测试 1: 创建 Parameter
param1
=
Parameter
(
torch
.
randn
(
5
,
10
))
print
(
f
"✓ 创建 Parameter,形状:
{
param1
.
shape
}
"
)
# 检查是否是 Parameter 类型(可能是 InfiniCoreParameter 的别名)
from
nn.modules.parameter
import
InfiniCoreParameter
assert
isinstance
(
param1
,
(
Parameter
,
InfiniCoreParameter
)),
"应该是 Parameter 类型"
assert
isinstance
(
param1
,
torch
.
Tensor
),
"应该是 torch.Tensor 的子类"
# 测试 2: requires_grad
param2
=
Parameter
(
torch
.
randn
(
3
,
4
),
requires_grad
=
False
)
print
(
f
"✓ 创建 requires_grad=False 的 Parameter:
{
param2
.
requires_grad
}
"
)
assert
not
param2
.
requires_grad
,
"requires_grad 应该为 False"
param3
=
Parameter
(
torch
.
randn
(
3
,
4
),
requires_grad
=
True
)
print
(
f
"✓ 创建 requires_grad=True 的 Parameter:
{
param3
.
requires_grad
}
"
)
assert
param3
.
requires_grad
,
"requires_grad 应该为 True"
# 测试 3: 自动注册到 Module
class
TestModule
(
Module
):
def
__init__
(
self
):
super
().
__init__
()
self
.
weight
=
Parameter
(
torch
.
randn
(
5
,
10
))
self
.
bias
=
Parameter
(
torch
.
randn
(
5
))
test_module
=
TestModule
()
param_count
=
sum
(
1
for
_
in
test_module
.
parameters
())
print
(
f
"✓ 自动注册到 Module,参数数量:
{
param_count
}
"
)
assert
param_count
==
2
,
f
"应该有 2 个参数,实际为
{
param_count
}
"
# 测试 4: 参数访问
assert
test_module
.
weight
is
not
None
,
"weight 应该可以访问"
assert
test_module
.
bias
is
not
None
,
"bias 应该可以访问"
print
(
"✓ 参数可以通过属性访问"
)
# 测试 5: state_dict
state_dict
=
test_module
.
state_dict
()
print
(
f
"✓ state_dict 键数量:
{
len
(
state_dict
)
}
"
)
assert
'weight'
in
state_dict
,
"state_dict 应该包含 weight"
assert
'bias'
in
state_dict
,
"state_dict 应该包含 bias"
print
(
f
"✓ state_dict 键:
{
list
(
state_dict
.
keys
())
}
"
)
# 测试 6: __repr__
repr_str
=
repr
(
param1
)
print
(
f
"✓ __repr__ 方法: 输出包含类名"
)
assert
"Parameter"
in
repr_str
or
"InfiniCoreParameter"
in
repr_str
,
"repr 应该包含类名"
print
(
repr_str
[:
100
]
+
"..."
)
# 测试 7: 与 torch.nn.Parameter 兼容性
class
MixedModule
(
Module
):
def
__init__
(
self
):
super
().
__init__
()
self
.
torch_param
=
nn
.
Parameter
(
torch
.
randn
(
3
,
4
))
self
.
infinicore_param
=
Parameter
(
torch
.
randn
(
3
,
4
))
mixed_module
=
MixedModule
()
mixed_param_count
=
sum
(
1
for
_
in
mixed_module
.
parameters
())
print
(
f
"✓ 混合使用 torch.nn.Parameter 和 Parameter,参数数量:
{
mixed_param_count
}
"
)
assert
mixed_param_count
==
2
,
f
"应该有 2 个参数,实际为
{
mixed_param_count
}
"
# 测试 8: 前向传播
class
TestModuleWithForward
(
Module
):
def
__init__
(
self
):
super
().
__init__
()
self
.
weight
=
Parameter
(
torch
.
randn
(
5
,
10
))
self
.
bias
=
Parameter
(
torch
.
randn
(
5
))
def
forward
(
self
,
x
):
return
x
@
self
.
weight
.
t
()
+
self
.
bias
test_module_forward
=
TestModuleWithForward
()
test_input
=
torch
.
randn
(
2
,
10
)
with
torch
.
no_grad
():
output
=
test_module_forward
.
forward
(
test_input
)
print
(
f
"✓ 前向传播成功,输出形状:
{
output
.
shape
}
"
)
assert
output
.
shape
==
(
2
,
5
),
f
"输出形状应该是 (2, 5),实际为
{
output
.
shape
}
"
# 测试 9: 从 None 创建
param_empty
=
Parameter
(
None
)
print
(
f
"✓ 从 None 创建 Parameter,形状:
{
param_empty
.
shape
}
"
)
assert
param_empty
.
shape
==
torch
.
Size
([
0
]),
"从 None 创建应该是空张量"
# 测试 10: 深拷贝
import
copy
param_copy
=
copy
.
deepcopy
(
param1
)
print
(
f
"✓ 深拷贝 Parameter,形状:
{
param_copy
.
shape
}
"
)
assert
param_copy
.
shape
==
param1
.
shape
,
"深拷贝后形状应该相同"
assert
not
torch
.
equal
(
param_copy
,
param1
)
or
id
(
param_copy
)
!=
id
(
param1
),
"深拷贝应该是新对象"
print
(
"
\n
=== 所有测试通过! ==="
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment