Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
jerrrrry
infinicore
Commits
4a7839a8
"git@developer.sourcefind.cn:yangql/googletest.git" did not exist on "d655d0989db0fc66f4004bd9f6d78da4bdc045e9"
Commit
4a7839a8
authored
Nov 17, 2025
by
zhuyue
Committed by
zhuyue
Nov 18, 2025
Browse files
Convert Python list to InfiniCore Tensor
parent
74934cdf
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
477 additions
and
2 deletions
+477
-2
python/infinicore/__init__.py
python/infinicore/__init__.py
+4
-0
python/infinicore/tensor.py
python/infinicore/tensor.py
+147
-2
python/infinicore/utils.py
python/infinicore/utils.py
+45
-0
test/infinicore/test_from_list.py
test/infinicore/test_from_list.py
+281
-0
No files found.
python/infinicore/__init__.py
View file @
4a7839a8
...
@@ -38,6 +38,8 @@ from infinicore.tensor import (
...
@@ -38,6 +38,8 @@ from infinicore.tensor import (
empty
,
empty
,
empty_like
,
empty_like
,
from_blob
,
from_blob
,
from_list
,
from_numpy
,
from_torch
,
from_torch
,
ones
,
ones
,
strided_empty
,
strided_empty
,
...
@@ -85,6 +87,8 @@ __all__ = [
...
@@ -85,6 +87,8 @@ __all__ = [
"empty"
,
"empty"
,
"empty_like"
,
"empty_like"
,
"from_blob"
,
"from_blob"
,
"from_list"
,
"from_numpy"
,
"from_torch"
,
"from_torch"
,
"ones"
,
"ones"
,
"strided_empty"
,
"strided_empty"
,
...
...
python/infinicore/tensor.py
View file @
4a7839a8
import
ctypes
import
numpy
as
np
import
infinicore.device
import
infinicore.device
import
infinicore.dtype
import
infinicore.dtype
from
infinicore.lib
import
_infinicore
from
infinicore.lib
import
_infinicore
from
.utils
import
to_infinicore_dtype
from
.utils
import
(
infinicore_to_numpy_dtype
,
numpy_to_infinicore_dtype
,
to_infinicore_dtype
,
)
class
Tensor
:
class
Tensor
:
...
@@ -157,5 +165,142 @@ def from_torch(torch_tensor) -> Tensor:
...
@@ -157,5 +165,142 @@ def from_torch(torch_tensor) -> Tensor:
dtype
=
infini_type
.
_underlying
,
dtype
=
infini_type
.
_underlying
,
device
=
infini_device
.
_underlying
,
device
=
infini_device
.
_underlying
,
),
),
torch_ref
=
torch_tensor
,
_torch_ref
=
torch_tensor
,
)
def
from_numpy
(
np_array
,
*
,
dtype
:
infinicore
.
dtype
=
None
,
device
:
infinicore
.
device
=
None
,
)
->
Tensor
:
"""Convert a NumPy ndarray to an infinicore Tensor.
Args:
np_array: NumPy ndarray to convert to tensor
dtype: Optional infinicore dtype. If None, inferred from numpy array
device: Optional infinicore device. If None, defaults to CPU device
Returns:
Tensor: An infinicore tensor created from the numpy array
Raises:
TypeError: If input data is not a numpy ndarray
ValueError: If input array is empty
Note:
NumPy arrays can only be created on CPU. For CUDA devices, data is first
created on CPU, then copied to the target device.
"""
# Input validation
if
not
isinstance
(
np_array
,
np
.
ndarray
):
raise
TypeError
(
f
"Input data must be a np.ndarray, got
{
type
(
np_array
).
__name__
}
"
)
if
np_array
.
size
==
0
:
raise
ValueError
(
"Input array cannot be empty"
)
# Determine target numpy dtype
# If dtype is specified, convert it to numpy dtype first
if
dtype
is
not
None
:
np_dtype
=
infinicore_to_numpy_dtype
(
dtype
)
# Create a copy with the target dtype if dtype doesn't match
# Use copy=True to ensure we don't modify the original array
if
np_dtype
!=
np_array
.
dtype
:
np_array
=
np_array
.
astype
(
np_dtype
,
copy
=
True
)
# Ensure C-contiguous layout
elif
not
np_array
.
flags
.
c_contiguous
:
np_array
=
np
.
ascontiguousarray
(
np_array
)
else
:
# Ensure C-contiguous layout
if
not
np_array
.
flags
.
c_contiguous
:
np_array
=
np
.
ascontiguousarray
(
np_array
)
# Infer infinicore dtype if not provided
infini_type
=
(
dtype
if
dtype
is
not
None
else
numpy_to_infinicore_dtype
(
np_array
.
dtype
)
)
# Default to CPU device if not provided
infini_device
=
device
if
device
is
not
None
else
infinicore
.
device
(
"cpu"
,
0
)
cpu_device
=
infinicore
.
device
(
"cpu"
,
0
)
# Create a temporary tensor on CPU using from_blob to reference numpy array
# This allows us to copy data without keeping numpy array reference
data_ptr
=
np_array
.
ctypes
.
data_as
(
ctypes
.
c_void_p
).
value
temp_tensor
=
Tensor
(
_infinicore
.
from_blob
(
data_ptr
,
list
(
np_array
.
shape
),
dtype
=
infini_type
.
_underlying
,
device
=
cpu_device
.
_underlying
,
)
)
)
# Always create the result tensor on CPU first, then copy data
# This ensures we have a proper copy of the data
result
=
empty
(
list
(
np_array
.
shape
),
dtype
=
infini_type
,
device
=
cpu_device
)
result
.
copy_
(
temp_tensor
)
# If target device is not CPU, move the tensor to the target device
# The temporary tensor and numpy array will be garbage collected
# since we don't keep references to them
if
infini_device
.
type
!=
"cpu"
:
result
=
result
.
to
(
infini_device
)
return
result
def
from_list
(
data
,
*
,
dtype
=
None
,
device
=
None
)
->
Tensor
:
"""Convert a Python list to an infinicore Tensor.
Args:
data: Python list or nested list to convert to tensor
dtype: Optional infinicore dtype. If None, inferred from numpy array
device: Optional infinicore device. If None, defaults to CPU device
Returns:
Tensor: An infinicore tensor created from the list data
Raises:
TypeError: If input data is not a list or tuple
ValueError: If input data is empty
Note:
NumPy arrays can only be created on CPU. For CUDA devices, data is first
created on CPU, then copied to the target device.
This function internally converts the list to a numpy array and calls from_numpy.
"""
# Input validation
if
not
isinstance
(
data
,
(
list
,
tuple
)):
raise
TypeError
(
f
"Input data must be a list or tuple, got
{
type
(
data
).
__name__
}
"
)
if
not
data
:
raise
ValueError
(
"Input data cannot be empty"
)
# Determine target numpy dtype
# If dtype is specified, convert it to numpy dtype first
# This ensures the numpy array has the correct dtype from the start
if
dtype
is
not
None
:
np_dtype
=
infinicore_to_numpy_dtype
(
dtype
)
else
:
np_dtype
=
None
# Let numpy infer
# Convert Python list to numpy array with correct dtype
# NumPy arrays can only be created on CPU
# Use np.array(..., copy=True, order='C') to efficiently:
# - Convert data type (if dtype is specified)
# - Create a copy (ensuring data ownership)
# - Ensure C-contiguous memory layout
if
np_dtype
is
not
None
:
np_array
=
np
.
array
(
data
,
dtype
=
np_dtype
,
copy
=
True
,
order
=
"C"
)
else
:
np_array
=
np
.
array
(
data
,
copy
=
True
,
order
=
"C"
)
# Reuse from_numpy to create the tensor
# This avoids code duplication and ensures consistent behavior
return
from_numpy
(
np_array
,
dtype
=
dtype
,
device
=
device
)
python/infinicore/utils.py
View file @
4a7839a8
import
numpy
as
np
import
torch
import
torch
import
infinicore
import
infinicore
...
@@ -45,3 +46,47 @@ def to_infinicore_dtype(torch_dtype):
...
@@ -45,3 +46,47 @@ def to_infinicore_dtype(torch_dtype):
return
infinicore
.
uint8
return
infinicore
.
uint8
else
:
else
:
raise
ValueError
(
f
"Unsupported torch dtype:
{
torch_dtype
}
"
)
raise
ValueError
(
f
"Unsupported torch dtype:
{
torch_dtype
}
"
)
def
numpy_to_infinicore_dtype
(
numpy_dtype
):
"""Convert numpy data type to infinicore data type"""
if
numpy_dtype
==
np
.
float32
:
return
infinicore
.
float32
elif
numpy_dtype
==
np
.
float64
:
return
infinicore
.
float64
elif
numpy_dtype
==
np
.
float16
:
return
infinicore
.
float16
elif
numpy_dtype
==
np
.
int8
:
return
infinicore
.
int8
elif
numpy_dtype
==
np
.
int16
:
return
infinicore
.
int16
elif
numpy_dtype
==
np
.
int32
:
return
infinicore
.
int32
elif
numpy_dtype
==
np
.
int64
:
return
infinicore
.
int64
elif
numpy_dtype
==
np
.
uint8
:
return
infinicore
.
uint8
else
:
raise
ValueError
(
f
"Unsupported numpy dtype:
{
numpy_dtype
}
"
)
def
infinicore_to_numpy_dtype
(
infini_dtype
):
"""Convert infinicore data type to numpy data type"""
if
infini_dtype
==
infinicore
.
float32
:
return
np
.
float32
elif
infini_dtype
==
infinicore
.
float64
:
return
np
.
float64
elif
infini_dtype
==
infinicore
.
float16
:
return
np
.
float16
elif
infini_dtype
==
infinicore
.
int8
:
return
np
.
int8
elif
infini_dtype
==
infinicore
.
int16
:
return
np
.
int16
elif
infini_dtype
==
infinicore
.
int32
:
return
np
.
int32
elif
infini_dtype
==
infinicore
.
int64
:
return
np
.
int64
elif
infini_dtype
==
infinicore
.
uint8
:
return
np
.
uint8
else
:
raise
ValueError
(
f
"Unsupported infinicore dtype:
{
infini_dtype
}
"
)
test/infinicore/test_from_list.py
0 → 100644
View file @
4a7839a8
import
torch
import
infinicore
def
_copy_infinicore_to_torch
(
infinicore_tensor
,
torch_result_tensor
):
"""Helper function: Copy infinicore tensor to torch tensor
Args:
infinicore_tensor: Source infinicore tensor
torch_result_tensor: Target torch tensor (to receive data)
Returns:
torch.Tensor: Torch tensor containing copied data
"""
# Determine the device from torch tensor
torch_device
=
torch_result_tensor
.
device
if
torch_device
.
type
==
"cuda"
:
infini_device
=
infinicore
.
device
(
"cuda"
,
torch_device
.
index
or
0
)
else
:
infini_device
=
infinicore
.
device
(
"cpu"
,
0
)
infini_result
=
infinicore
.
from_blob
(
torch_result_tensor
.
data_ptr
(),
list
(
torch_result_tensor
.
shape
),
dtype
=
infinicore_tensor
.
dtype
,
device
=
infini_device
,
)
# Ensure tensor is on the same device as target
tensor_to_copy
=
infinicore_tensor
if
tensor_to_copy
.
device
.
type
!=
infini_device
.
type
or
\
(
infini_device
.
type
==
"cuda"
and
tensor_to_copy
.
device
.
index
!=
infini_device
.
index
):
tensor_to_copy
=
tensor_to_copy
.
to
(
infini_device
)
infini_result
.
copy_
(
tensor_to_copy
)
return
torch_result_tensor
def
compare_with_torch
(
infinicore_tensor
,
expected_list
,
dtype
=
torch
.
float32
,
atol
=
1e-6
):
"""Helper function: Compare infinicore tensor computation results with PyTorch expected results
Uses unified addition verification: tensor + zero_tensor == tensor
Converts all data to float32 for verification to avoid different verification paths for different data types
Args:
infinicore_tensor: infinicore tensor object
expected_list: Expected data (Python list, can be nested)
dtype: torch dtype (for compatibility, actually uses float32 for verification)
atol: Absolute tolerance for floating point comparison
Returns:
bool: Whether data matches
"""
# Verify basic attributes
expected_shape
=
list
(
torch
.
tensor
(
expected_list
).
shape
)
assert
list
(
infinicore_tensor
.
shape
)
==
expected_shape
,
\
f
"Shape mismatch: expected
{
expected_shape
}
, got
{
list
(
infinicore_tensor
.
shape
)
}
"
# Flatten nested list and convert to float
def
flatten
(
data
):
result
=
[]
for
item
in
data
:
if
isinstance
(
item
,
(
list
,
tuple
)):
result
.
extend
(
flatten
(
item
))
else
:
result
.
append
(
float
(
item
))
return
result
flat_expected
=
flatten
(
expected_list
)
# Unified verification through addition: create float32 version of tensor for verification
# This unifies verification logic and avoids different verification paths for different data types
tensor_f32
=
infinicore
.
from_list
(
flat_expected
,
dtype
=
infinicore
.
float32
)
expected_f32
=
torch
.
tensor
(
flat_expected
,
dtype
=
torch
.
float32
)
# Add with zero tensor to verify data: tensor + zero == tensor
zero_f32
=
infinicore
.
from_list
([
0.0
]
*
tensor_f32
.
numel
(),
dtype
=
infinicore
.
float32
)
result
=
tensor_f32
+
zero_f32
# Verify result
torch_result
=
_copy_infinicore_to_torch
(
result
,
torch
.
zeros_like
(
expected_f32
))
return
torch
.
allclose
(
expected_f32
,
torch_result
,
atol
=
atol
)
# Parameterized test data: test cases for different dimensions
_TEST_SHAPES
=
[
([
1
,
2
,
3
,
4
,
5
],
[
5
],
"1D"
),
([[
1
,
2
,
3
],
[
4
,
5
,
6
]],
[
2
,
3
],
"2D"
),
([[[
1
,
2
],
[
3
,
4
]],
[[
5
,
6
],
[
7
,
8
]]],
[
2
,
2
,
2
],
"3D"
),
]
def
test_from_list_basic_shapes
():
"""Test converting lists of different dimensions to tensor (parameterized test)"""
for
data
,
expected_shape
,
dim_name
in
_TEST_SHAPES
:
print
(
f
"Testing
{
dim_name
}
list to tensor conversion"
)
# Create float tensor for addition verification
tensor_f32
=
infinicore
.
from_list
(
data
,
dtype
=
infinicore
.
float32
)
# Verify shape and data type
assert
list
(
tensor_f32
.
shape
)
==
expected_shape
,
\
f
"
{
dim_name
}
: Shape mismatch: expected
{
expected_shape
}
, got
{
list
(
tensor_f32
.
shape
)
}
"
assert
tensor_f32
.
dtype
==
infinicore
.
float32
,
\
f
"
{
dim_name
}
: Expected float32, got
{
tensor_f32
.
dtype
}
"
# Verify data correctness through addition
assert
compare_with_torch
(
tensor_f32
,
data
,
dtype
=
torch
.
float32
),
\
f
"
{
dim_name
}
: Data mismatch"
print
(
f
"✓
{
dim_name
}
list test passed"
)
def
test_from_list_float
():
"""Test converting float list to tensor"""
print
(
"="
*
50
)
print
(
"Testing float list to tensor conversion"
)
data
=
[[
1.0
,
2.5
,
3.7
],
[
4.2
,
5.9
,
6.1
]]
tensor
=
infinicore
.
from_list
(
data
)
# Verify dtype (should be float64, as Python float defaults to float64)
assert
tensor
.
dtype
==
infinicore
.
float64
,
f
"Expected float64, got
{
tensor
.
dtype
}
"
# Use unified verification method
assert
compare_with_torch
(
tensor
,
data
,
dtype
=
torch
.
float64
),
"Data mismatch"
print
(
"✓ Float list test passed"
)
def
test_from_list_with_dtype
():
"""Test converting list to tensor with specified dtype"""
print
(
"="
*
50
)
print
(
"Testing list to tensor conversion with specified dtype"
)
data
=
[
1
,
2
,
3
,
4
,
5
]
# Specify as float32
tensor
=
infinicore
.
from_list
(
data
,
dtype
=
infinicore
.
float32
)
# Verify dtype
assert
tensor
.
dtype
==
infinicore
.
float32
,
f
"Expected float32, got
{
tensor
.
dtype
}
"
# Use unified verification method
assert
compare_with_torch
(
tensor
,
data
,
dtype
=
torch
.
float32
),
"Data mismatch"
print
(
"✓ Specified dtype test passed"
)
def
test_from_list_with_device
():
"""Test converting list to tensor with specified device"""
print
(
"="
*
50
)
print
(
"Testing list to tensor conversion with specified device"
)
data
=
[[
1.0
,
2.0
,
3.0
],
[
4.0
,
5.0
,
6.0
]]
# Test CPU device
tensor_cpu
=
infinicore
.
from_list
(
data
,
dtype
=
infinicore
.
float32
,
device
=
infinicore
.
device
(
"cpu"
,
0
))
assert
tensor_cpu
.
device
.
type
==
"cpu"
,
"Expected CPU device"
# Verify data correctness on CPU
assert
compare_with_torch
(
tensor_cpu
,
data
,
dtype
=
torch
.
float32
),
"CPU data mismatch"
# Test CUDA device (if available)
try
:
# Check if CUDA is available in PyTorch
if
not
torch
.
cuda
.
is_available
():
print
(
"⚠ CUDA not available in PyTorch, skipping CUDA test"
)
else
:
tensor_cuda
=
infinicore
.
from_list
(
data
,
dtype
=
infinicore
.
float32
,
device
=
infinicore
.
device
(
"cuda"
,
0
))
assert
tensor_cuda
.
device
.
type
==
"cuda"
,
"Expected CUDA device"
# Create PyTorch CUDA tensor for comparison
torch_expected_cuda
=
torch
.
tensor
(
data
,
dtype
=
torch
.
float32
,
device
=
"cuda:0"
)
torch_result_cuda
=
torch
.
zeros_like
(
torch_expected_cuda
)
# Copy infinicore CUDA tensor to PyTorch CUDA tensor and compare
_copy_infinicore_to_torch
(
tensor_cuda
,
torch_result_cuda
)
assert
torch
.
allclose
(
torch_expected_cuda
,
torch_result_cuda
),
"CUDA data mismatch"
print
(
"✓ CUDA device test passed (device type and data correctness verified on CUDA)"
)
except
Exception
as
e
:
print
(
f
"⚠ CUDA device not available, skipping CUDA test:
{
e
}
"
)
print
(
"✓ Specified device test passed"
)
def
test_from_list_operations
():
"""Test operations on tensors created from lists"""
print
(
"="
*
50
)
print
(
"Testing operations on tensors created from lists"
)
data1
=
[[
1.0
,
2.0
,
3.0
],
[
4.0
,
5.0
,
6.0
]]
data2
=
[[
7.0
,
8.0
,
9.0
],
[
10.0
,
11.0
,
12.0
]]
t1
=
infinicore
.
from_list
(
data1
,
dtype
=
infinicore
.
float32
)
t2
=
infinicore
.
from_list
(
data2
,
dtype
=
infinicore
.
float32
)
# Test addition and multiplication
result_add
=
t1
+
t2
result_mul
=
t1
*
t2
# Compare with torch
torch_t1
=
torch
.
tensor
(
data1
,
dtype
=
torch
.
float32
)
torch_t2
=
torch
.
tensor
(
data2
,
dtype
=
torch
.
float32
)
torch_add
=
torch_t1
+
torch_t2
torch_mul
=
torch_t1
*
torch_t2
# Verify results
torch_add_result
=
_copy_infinicore_to_torch
(
result_add
,
torch
.
zeros_like
(
torch_add
))
assert
torch
.
allclose
(
torch_add
,
torch_add_result
),
"Addition result mismatch"
torch_mul_result
=
_copy_infinicore_to_torch
(
result_mul
,
torch
.
zeros_like
(
torch_mul
))
assert
torch
.
allclose
(
torch_mul
,
torch_mul_result
),
"Multiplication result mismatch"
print
(
"✓ Operations test passed"
)
def
test_from_list_single_element
():
"""Test converting single element list to tensor"""
print
(
"="
*
50
)
print
(
"Testing single element list to tensor conversion"
)
data
=
[
42
]
tensor
=
infinicore
.
from_list
(
data
,
dtype
=
infinicore
.
float32
)
assert
list
(
tensor
.
shape
)
==
[
1
],
f
"Expected shape [1], got
{
tensor
.
shape
}
"
assert
tensor
.
dtype
==
infinicore
.
float32
,
f
"Expected float32, got
{
tensor
.
dtype
}
"
# Use unified verification method
assert
compare_with_torch
(
tensor
,
data
,
dtype
=
torch
.
float32
),
"Data mismatch"
print
(
"✓ Single element test passed"
)
def
test_from_list_edge_cases
():
"""Test edge cases"""
print
(
"="
*
50
)
print
(
"Testing edge cases"
)
# Test empty list (should raise exception)
try
:
infinicore
.
from_list
([])
assert
False
,
"Expected ValueError for empty list"
except
ValueError
:
pass
# Expected exception
# Test non-list input (should raise exception)
try
:
infinicore
.
from_list
(
"not a list"
)
assert
False
,
"Expected TypeError for non-list input"
except
TypeError
:
pass
# Expected exception
# Test single scalar (wrapped in list)
data
=
42
tensor
=
infinicore
.
from_list
([
data
],
dtype
=
infinicore
.
float32
)
assert
list
(
tensor
.
shape
)
==
[
1
],
f
"Expected shape [1], got
{
tensor
.
shape
}
"
assert
compare_with_torch
(
tensor
,
[
data
],
dtype
=
torch
.
float32
),
"Data mismatch"
print
(
"✓ Edge cases test passed"
)
if
__name__
==
"__main__"
:
print
(
"
\n
Starting from_list functionality tests...
\n
"
)
try
:
test_from_list_basic_shapes
()
test_from_list_float
()
test_from_list_with_dtype
()
test_from_list_with_device
()
test_from_list_operations
()
test_from_list_single_element
()
test_from_list_edge_cases
()
print
(
"
\n
"
+
"="
*
50
)
print
(
"✅ All tests passed!"
)
print
(
"="
*
50
)
except
Exception
as
e
:
print
(
f
"
\n
❌ Test failed:
{
e
}
"
)
import
traceback
traceback
.
print_exc
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment