Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
torch-cluster
Commits
15aa4bb7
Commit
15aa4bb7
authored
Apr 07, 2018
by
rusty1s
Browse files
removed old code
parent
e094c3bf
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
0 additions
and
241 deletions
+0
-241
torch_cluster/functions/__init__.py
torch_cluster/functions/__init__.py
+0
-0
torch_cluster/functions/grid.py
torch_cluster/functions/grid.py
+0
-97
torch_cluster/functions/normalized_cut.py
torch_cluster/functions/normalized_cut.py
+0
-17
torch_cluster/functions/serial.py
torch_cluster/functions/serial.py
+0
-23
torch_cluster/functions/utils/__init__.py
torch_cluster/functions/utils/__init__.py
+0
-0
torch_cluster/functions/utils/consecutive.py
torch_cluster/functions/utils/consecutive.py
+0
-34
torch_cluster/functions/utils/degree.py
torch_cluster/functions/utils/degree.py
+0
-8
torch_cluster/functions/utils/ffi.py
torch_cluster/functions/utils/ffi.py
+0
-33
torch_cluster/functions/utils/permute.py
torch_cluster/functions/utils/permute.py
+0
-29
No files found.
torch_cluster/functions/__init__.py
deleted
100644 → 0
View file @
e094c3bf
torch_cluster/functions/grid.py
deleted
100644 → 0
View file @
e094c3bf
from
__future__
import
division
import
torch
from
.utils.ffi
import
_get_typed_func
from
.utils.consecutive
import
consecutive
def
_preprocess
(
position
,
size
,
batch
=
None
,
start
=
None
):
size
=
size
.
type_as
(
position
)
# Allow one-dimensional positions.
if
position
.
dim
()
==
1
:
position
=
position
.
unsqueeze
(
-
1
)
assert
size
.
dim
()
==
1
,
'Size tensor must be one-dimensional'
assert
position
.
size
(
-
1
)
==
size
.
size
(
-
1
),
(
'Last dimension of position tensor must have same size as size tensor'
)
# Translate to minimal positive positions if no start was passed.
if
start
is
None
:
min
=
[]
for
i
in
range
(
position
.
size
(
-
1
)):
min
.
append
(
position
[:,
i
].
min
())
start
=
position
.
new
(
min
)
position
=
position
-
position
.
new
(
min
)
else
:
assert
start
.
numel
()
==
size
.
numel
(),
(
'Start tensor must have same size as size tensor'
)
position
=
position
-
start
.
type_as
(
position
)
# If given, append batch to position tensor.
if
batch
is
not
None
:
batch
=
batch
.
unsqueeze
(
-
1
).
type_as
(
position
)
assert
position
.
size
()[:
-
1
]
==
batch
.
size
()[:
-
1
],
(
'Position tensor must have same size as batch tensor apart from '
'the last dimension'
)
position
=
torch
.
cat
([
batch
,
position
],
dim
=-
1
)
size
=
torch
.
cat
([
size
.
new
(
1
).
fill_
(
1
),
size
],
dim
=-
1
)
return
position
,
size
,
start
def
_minimal_cluster_size
(
position
,
size
):
max
=
[]
for
i
in
range
(
position
.
size
(
-
1
)):
max
.
append
(
position
[:,
i
].
max
())
cluster_size
=
(
size
.
new
(
max
)
/
size
).
long
()
+
1
return
cluster_size
def
_fixed_cluster_size
(
position
,
size
,
start
,
batch
=
None
,
end
=
None
):
if
end
is
None
:
return
_minimal_cluster_size
(
position
,
size
)
end
=
end
.
type_as
(
size
)
-
start
.
type_as
(
size
)
eps
=
0.000001
# Simulate [start, end) interval.
if
batch
is
None
:
cluster_size
=
((
end
/
size
).
float
()
-
eps
).
long
()
+
1
else
:
cluster_size
=
((
end
/
size
[
1
:]).
float
()
-
eps
).
long
()
+
1
max_batch
=
cluster_size
.
new
(
1
).
fill_
(
batch
.
max
()
+
1
)
cluster_size
=
torch
.
cat
([
max_batch
,
cluster_size
],
dim
=
0
)
return
cluster_size
def
_grid_cluster
(
position
,
size
,
cluster_size
):
C
=
cluster_size
.
prod
()
cluster
=
cluster_size
.
new
(
torch
.
Size
(
list
(
position
.
size
())[:
-
1
]))
cluster
=
cluster
.
unsqueeze
(
dim
=-
1
)
func
=
_get_typed_func
(
'grid'
,
position
)
func
(
C
,
cluster
,
position
,
size
,
cluster_size
)
cluster
=
cluster
.
squeeze
(
dim
=-
1
)
return
cluster
,
C
def
sparse_grid_cluster
(
position
,
size
,
batch
=
None
,
start
=
None
):
position
,
size
,
start
=
_preprocess
(
position
,
size
,
batch
,
start
)
cluster_size
=
_minimal_cluster_size
(
position
,
size
)
cluster
,
C
=
_grid_cluster
(
position
,
size
,
cluster_size
)
cluster
=
consecutive
(
cluster
)
if
batch
is
None
:
return
cluster
else
:
# batch = u / (C // cluster_size[0])
return
cluster
,
batch
def
dense_grid_cluster
(
position
,
size
,
batch
=
None
,
start
=
None
,
end
=
None
):
position
,
size
,
start
=
_preprocess
(
position
,
size
,
batch
,
start
)
cluster_size
=
_fixed_cluster_size
(
position
,
size
,
start
,
batch
,
end
)
cluster
,
C
=
_grid_cluster
(
position
,
size
,
cluster_size
)
return
cluster
,
C
torch_cluster/functions/normalized_cut.py
deleted
100644 → 0
View file @
e094c3bf
from
__future__
import
division
import
torch
def
normalized_cut
(
edge_index
,
num_nodes
,
degree
,
edge_attr
=
None
):
row
,
col
=
edge_index
cut
=
1
/
degree
cut
=
cut
[
row
]
+
cut
[
col
]
if
edge_attr
is
None
:
return
cut
else
:
if
edge_attr
.
dim
()
>
1
and
edge_attr
.
size
(
1
)
>
1
:
edge_attr
=
torch
.
norm
(
edge_attr
,
2
,
1
)
return
edge_attr
.
squeeze
()
*
cut
torch_cluster/functions/serial.py
deleted
100644 → 0
View file @
e094c3bf
from
.utils.permute
import
permute
from
.utils.degree
import
node_degree
from
.utils.ffi
import
_get_func
from
.utils.consecutive
import
consecutive
def
serial_cluster
(
edge_index
,
batch
=
None
,
num_nodes
=
None
):
num_nodes
=
edge_index
.
max
()
+
1
if
num_nodes
is
None
else
num_nodes
row
,
col
=
permute
(
edge_index
,
num_nodes
)
degree
=
node_degree
(
row
,
num_nodes
,
out
=
row
.
new
())
cluster
=
edge_index
.
new
(
num_nodes
).
fill_
(
-
1
)
func
=
_get_func
(
'random'
,
cluster
)
func
(
cluster
,
row
,
col
,
degree
)
cluster
,
u
=
consecutive
(
cluster
)
if
batch
is
None
:
return
cluster
else
:
# TODO: Fix
return
cluster
,
batch
torch_cluster/functions/utils/__init__.py
deleted
100644 → 0
View file @
e094c3bf
torch_cluster/functions/utils/consecutive.py
deleted
100644 → 0
View file @
e094c3bf
import
torch
from
torch_unique
import
unique
def
_get_type
(
max_value
,
cuda
):
if
max_value
<=
255
:
return
torch
.
cuda
.
ByteTensor
if
cuda
else
torch
.
ByteTensor
elif
max_value
<=
32767
:
# pragma: no cover
return
torch
.
cuda
.
ShortTensor
if
cuda
else
torch
.
ShortTensor
elif
max_value
<=
2147483647
:
# pragma: no cover
return
torch
.
cuda
.
IntTensor
if
cuda
else
torch
.
IntTensor
else
:
# pragma: no cover
return
torch
.
cuda
.
LongTensor
if
cuda
else
torch
.
LongTensor
def
consecutive
(
x
):
initial_size
=
x
.
size
()
# Compute unique vector.
u
=
unique
(
x
.
view
(
-
1
))
# Compute mask with mask[u[0]] = 0, mask[u[1]] = 1, ...
# As mask can get very big (dependent on the maximum value in `x`, we want
# to take the least possible amount of space on disk (`_get_type`).
max_value
=
u
[
-
1
]
+
1
mask
=
_get_type
(
u
.
size
(
0
),
x
.
is_cuda
)(
max_value
)
mask
[
u
]
=
torch
.
arange
(
0
,
u
.
size
(
0
),
out
=
mask
.
new
())
# Select the values in `mask` based on `x` and reshape to initial size.
x
=
mask
[
x
.
view
(
-
1
)]
x
=
x
.
view
(
initial_size
)
x
=
x
.
long
()
return
x
torch_cluster/functions/utils/degree.py
deleted
100644 → 0
View file @
e094c3bf
import
torch
def
node_degree
(
index
,
num_nodes
,
out
=
None
):
out
=
index
.
new
(
num_nodes
)
if
out
is
None
else
out
zero
=
torch
.
zeros
(
num_nodes
,
out
=
out
)
one
=
torch
.
ones
(
index
.
size
(
0
),
out
=
zero
.
new
())
return
zero
.
scatter_add_
(
0
,
index
,
one
)
torch_cluster/functions/utils/ffi.py
deleted
100644 → 0
View file @
e094c3bf
from
..._ext
import
ffi
def
_get_func
(
name
,
tensor
):
cuda
=
'_cuda'
if
tensor
.
is_cuda
else
''
return
getattr
(
ffi
,
'cluster_{}{}'
.
format
(
name
,
cuda
))
def
_get_typed_func
(
name
,
tensor
):
typename
=
type
(
tensor
).
__name__
.
replace
(
'Tensor'
,
''
)
cuda
=
'cuda_'
if
tensor
.
is_cuda
else
''
return
getattr
(
ffi
,
'cluster_{}_{}{}'
.
format
(
name
,
cuda
,
typename
))
def
ffi_serial
(
row
,
col
,
degree
,
weight
=
None
):
output
=
row
.
new
(
degree
.
size
(
0
)).
fill_
(
-
1
)
if
weight
is
None
:
func
=
_get_func
(
'serial'
,
row
)
func
(
output
,
row
,
col
,
degree
)
return
output
else
:
func
=
_get_typed_func
(
'serial'
,
weight
)
func
(
output
,
row
,
col
,
degree
,
weight
)
return
output
def
ffi_grid
(
position
,
size
,
count
):
C
=
count
.
prod
()
output
=
count
.
new
(
position
.
size
(
0
),
1
)
func
=
_get_typed_func
(
'grid'
,
position
)
func
(
C
,
output
,
position
,
size
,
count
)
output
=
output
.
squeeze
(
-
1
)
return
output
torch_cluster/functions/utils/permute.py
deleted
100644 → 0
View file @
e094c3bf
import
torch
def
sort
(
row
,
col
):
row
,
perm
=
row
.
sort
()
col
=
col
[
perm
]
return
row
,
col
def
permute
(
row
,
col
,
num_nodes
,
node_rid
=
None
,
edge_rid
=
None
):
num_edges
=
row
.
size
(
0
)
# Randomly reorder row and column indices.
if
edge_rid
is
None
:
edge_rid
=
torch
.
randperm
(
num_edges
).
type_as
(
row
)
row
,
col
=
row
[
edge_rid
],
col
[
edge_rid
]
# Randomly change row indices to new values.
if
node_rid
is
None
:
node_rid
=
torch
.
randperm
(
num_nodes
).
type_as
(
row
)
row
=
node_rid
[
row
]
# Sort row and column indices based on changed values.
row
,
col
=
sort
(
row
,
col
)
# Revert previous row value changes to old indices.
row
=
node_rid
.
sort
()[
1
][
row
]
return
row
,
col
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment