Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-sparse
Commits
1bf12762
Unverified
Commit
1bf12762
authored
Nov 25, 2022
by
Matthias Fey
Committed by
GitHub
Nov 25, 2022
Browse files
Set version (#295)
* set version * update * update * update * fix test
parent
00ccae84
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
30 additions
and
8 deletions
+30
-8
.github/workflows/building.yml
.github/workflows/building.yml
+5
-2
test/test_matmul.py
test/test_matmul.py
+15
-3
test/test_spspmm.py
test/test_spspmm.py
+9
-2
torch_sparse/cat.py
torch_sparse/cat.py
+1
-1
No files found.
.github/workflows/building.yml
View file @
1bf12762
...
@@ -45,7 +45,6 @@ jobs:
...
@@ -45,7 +45,6 @@ jobs:
-
name
:
Upgrade pip
-
name
:
Upgrade pip
run
:
|
run
:
|
pip install --upgrade setuptools
pip install --upgrade setuptools
pip list
-
name
:
Free up disk space
-
name
:
Free up disk space
if
:
${{ runner.os == 'Linux' }}
if
:
${{ runner.os == 'Linux' }}
...
@@ -67,7 +66,11 @@ jobs:
...
@@ -67,7 +66,11 @@ jobs:
if
:
${{ runner.os != 'macOS' }}
if
:
${{ runner.os != 'macOS' }}
run
:
|
run
:
|
VERSION=`sed -n "s/^__version__ = '\(.*\)'/\1/p" torch_sparse/__init__.py`
VERSION=`sed -n "s/^__version__ = '\(.*\)'/\1/p" torch_sparse/__init__.py`
sed -i "s/$VERSION/$VERSION+${{ matrix.cuda-version }}/" torch_sparse/__init__.py
TORCH_VERSION=`echo "pt${{ matrix.torch-version }}" | sed "s/..$//" | sed "s/\.//g"`
CUDA_VERSION=`echo ${{ matrix.cuda-version }}`
echo "New version name: $VERSION+$TORCH_VERSION$CUDA_VERSION"
sed -i "s/$VERSION/$VERSION+$TORCH_VERSION$CUDA_VERSION/" setup.py
sed -i "s/$VERSION/$VERSION+$TORCH_VERSION$CUDA_VERSION/" torch_sparse/__init__.py
shell
:
shell
:
bash
bash
...
...
test/test_matmul.py
View file @
1bf12762
...
@@ -3,6 +3,7 @@ from itertools import product
...
@@ -3,6 +3,7 @@ from itertools import product
import
pytest
import
pytest
import
torch
import
torch
import
torch_scatter
import
torch_scatter
from
torch_sparse.matmul
import
matmul
from
torch_sparse.matmul
import
matmul
from
torch_sparse.tensor
import
SparseTensor
from
torch_sparse.tensor
import
SparseTensor
...
@@ -12,6 +13,9 @@ from .utils import devices, grad_dtypes, reductions
...
@@ -12,6 +13,9 @@ from .utils import devices, grad_dtypes, reductions
@
pytest
.
mark
.
parametrize
(
'dtype,device,reduce'
,
@
pytest
.
mark
.
parametrize
(
'dtype,device,reduce'
,
product
(
grad_dtypes
,
devices
,
reductions
))
product
(
grad_dtypes
,
devices
,
reductions
))
def
test_spmm
(
dtype
,
device
,
reduce
):
def
test_spmm
(
dtype
,
device
,
reduce
):
if
device
==
torch
.
device
(
'cuda:0'
)
and
dtype
==
torch
.
bfloat16
:
return
# Not yet implemented.
src
=
torch
.
randn
((
10
,
8
),
dtype
=
dtype
,
device
=
device
)
src
=
torch
.
randn
((
10
,
8
),
dtype
=
dtype
,
device
=
device
)
src
[
2
:
4
,
:]
=
0
# Remove multiple rows.
src
[
2
:
4
,
:]
=
0
# Remove multiple rows.
src
[:,
2
:
4
]
=
0
# Remove multiple columns.
src
[:,
2
:
4
]
=
0
# Remove multiple columns.
...
@@ -39,13 +43,21 @@ def test_spmm(dtype, device, reduce):
...
@@ -39,13 +43,21 @@ def test_spmm(dtype, device, reduce):
out
=
matmul
(
src
,
other
,
reduce
)
out
=
matmul
(
src
,
other
,
reduce
)
out
.
backward
(
grad_out
)
out
.
backward
(
grad_out
)
assert
torch
.
allclose
(
expected
,
out
,
atol
=
1e-2
)
if
dtype
==
torch
.
float16
or
dtype
==
torch
.
bfloat16
:
assert
torch
.
allclose
(
expected_grad_value
,
value
.
grad
,
atol
=
1e-2
)
assert
torch
.
allclose
(
expected
,
out
,
atol
=
1e-1
)
assert
torch
.
allclose
(
expected_grad_other
,
other
.
grad
,
atol
=
1e-2
)
assert
torch
.
allclose
(
expected_grad_value
,
value
.
grad
,
atol
=
1e-1
)
assert
torch
.
allclose
(
expected_grad_other
,
other
.
grad
,
atol
=
1e-1
)
else
:
assert
torch
.
allclose
(
expected
,
out
)
assert
torch
.
allclose
(
expected_grad_value
,
value
.
grad
)
assert
torch
.
allclose
(
expected_grad_other
,
other
.
grad
)
@
pytest
.
mark
.
parametrize
(
'dtype,device'
,
product
(
grad_dtypes
,
devices
))
@
pytest
.
mark
.
parametrize
(
'dtype,device'
,
product
(
grad_dtypes
,
devices
))
def
test_spspmm
(
dtype
,
device
):
def
test_spspmm
(
dtype
,
device
):
if
device
==
torch
.
device
(
'cuda:0'
)
and
dtype
==
torch
.
bfloat16
:
return
# Not yet implemented.
src
=
torch
.
tensor
([[
1
,
0
,
0
],
[
0
,
1
,
0
],
[
0
,
0
,
1
]],
dtype
=
dtype
,
src
=
torch
.
tensor
([[
1
,
0
,
0
],
[
0
,
1
,
0
],
[
0
,
0
,
1
]],
dtype
=
dtype
,
device
=
device
)
device
=
device
)
...
...
test/test_spspmm.py
View file @
1bf12762
...
@@ -2,13 +2,17 @@ from itertools import product
...
@@ -2,13 +2,17 @@ from itertools import product
import
pytest
import
pytest
import
torch
import
torch
from
torch_sparse
import
spspmm
,
SparseTensor
from
.utils
import
grad_dtypes
,
devices
,
tensor
from
torch_sparse
import
SparseTensor
,
spspmm
from
.utils
import
devices
,
grad_dtypes
,
tensor
@
pytest
.
mark
.
parametrize
(
'dtype,device'
,
product
(
grad_dtypes
,
devices
))
@
pytest
.
mark
.
parametrize
(
'dtype,device'
,
product
(
grad_dtypes
,
devices
))
def
test_spspmm
(
dtype
,
device
):
def
test_spspmm
(
dtype
,
device
):
if
device
==
torch
.
device
(
'cuda:0'
)
and
dtype
==
torch
.
bfloat16
:
return
# Not yet implemented.
indexA
=
torch
.
tensor
([[
0
,
0
,
1
,
2
,
2
],
[
1
,
2
,
0
,
0
,
1
]],
device
=
device
)
indexA
=
torch
.
tensor
([[
0
,
0
,
1
,
2
,
2
],
[
1
,
2
,
0
,
0
,
1
]],
device
=
device
)
valueA
=
tensor
([
1
,
2
,
3
,
4
,
5
],
dtype
,
device
)
valueA
=
tensor
([
1
,
2
,
3
,
4
,
5
],
dtype
,
device
)
indexB
=
torch
.
tensor
([[
0
,
2
],
[
1
,
0
]],
device
=
device
)
indexB
=
torch
.
tensor
([[
0
,
2
],
[
1
,
0
]],
device
=
device
)
...
@@ -21,6 +25,9 @@ def test_spspmm(dtype, device):
...
@@ -21,6 +25,9 @@ def test_spspmm(dtype, device):
@
pytest
.
mark
.
parametrize
(
'dtype,device'
,
product
(
grad_dtypes
,
devices
))
@
pytest
.
mark
.
parametrize
(
'dtype,device'
,
product
(
grad_dtypes
,
devices
))
def
test_sparse_tensor_spspmm
(
dtype
,
device
):
def
test_sparse_tensor_spspmm
(
dtype
,
device
):
if
device
==
torch
.
device
(
'cuda:0'
)
and
dtype
==
torch
.
bfloat16
:
return
# Not yet implemented.
x
=
SparseTensor
(
x
=
SparseTensor
(
row
=
torch
.
tensor
(
row
=
torch
.
tensor
(
[
0
,
1
,
1
,
1
,
2
,
3
,
4
,
5
,
5
,
6
,
6
,
7
,
7
,
7
,
8
,
8
,
9
,
9
],
[
0
,
1
,
1
,
1
,
2
,
3
,
4
,
5
,
5
,
6
,
6
,
7
,
7
,
7
,
8
,
8
,
9
,
9
],
...
...
torch_sparse/cat.py
View file @
1bf12762
from
typing
import
Optional
,
List
,
Tuple
from
typing
import
Optional
,
List
,
Tuple
# noqa
import
torch
import
torch
from
torch_sparse.storage
import
SparseStorage
from
torch_sparse.storage
import
SparseStorage
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment