Commit 19fd8251 authored by limm's avatar limm
Browse files

support v0.6.16

parent 9ccee9c0
......@@ -2,9 +2,9 @@ from itertools import product
import pytest
import torch
from torch_sparse import SparseTensor, add
from .utils import dtypes, devices, tensor
from torch_sparse import SparseTensor, add
from torch_sparse.testing import devices, dtypes, tensor
@pytest.mark.parametrize('dtype,device', product(dtypes, devices))
......
import pytest
import torch
from torch_sparse.tensor import SparseTensor
from torch_sparse.cat import cat
from .utils import devices, tensor
from torch_sparse.cat import cat
from torch_sparse.tensor import SparseTensor
from torch_sparse.testing import devices, tensor
@pytest.mark.parametrize('device', devices)
......
......@@ -2,9 +2,9 @@ from itertools import product
import pytest
import torch
from torch_sparse.tensor import SparseTensor
from .utils import dtypes, devices, tensor
from torch_sparse.tensor import SparseTensor
from torch_sparse.testing import devices, dtypes, tensor
@pytest.mark.parametrize('dtype,device', product(dtypes, devices))
......
from itertools import product
import pytest
from torch_sparse.tensor import SparseTensor
from .utils import dtypes, devices
from torch_sparse.tensor import SparseTensor
from torch_sparse.testing import devices, dtypes
@pytest.mark.parametrize('dtype,device', product(dtypes, devices))
......
......@@ -3,15 +3,18 @@ from itertools import product
import pytest
import torch
import torch_scatter
from torch_sparse.matmul import matmul
from torch_sparse.tensor import SparseTensor
from .utils import devices, grad_dtypes, reductions
from torch_sparse.matmul import matmul, spspmm
from torch_sparse.tensor import SparseTensor
from torch_sparse.testing import devices, grad_dtypes, reductions
@pytest.mark.parametrize('dtype,device,reduce',
product(grad_dtypes, devices, reductions))
def test_spmm(dtype, device, reduce):
if device == torch.device('cuda:0') and dtype == torch.bfloat16:
return # Not yet implemented.
src = torch.randn((10, 8), dtype=dtype, device=device)
src[2:4, :] = 0 # Remove multiple rows.
src[:, 2:4] = 0 # Remove multiple columns.
......@@ -39,13 +42,20 @@ def test_spmm(dtype, device, reduce):
out = matmul(src, other, reduce)
out.backward(grad_out)
assert torch.allclose(expected, out, atol=1e-2)
assert torch.allclose(expected_grad_value, value.grad, atol=1e-2)
assert torch.allclose(expected_grad_other, other.grad, atol=1e-2)
atol = 1e-7
if dtype == torch.float16 or dtype == torch.bfloat16:
atol = 1e-1
assert torch.allclose(expected, out, atol=atol)
assert torch.allclose(expected_grad_value, value.grad, atol=atol)
assert torch.allclose(expected_grad_other, other.grad, atol=atol)
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_spspmm(dtype, device):
if dtype in {torch.half, torch.bfloat16}:
return # Not yet implemented.
src = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=dtype,
device=device)
......@@ -65,3 +75,5 @@ def test_spspmm(dtype, device):
rowptr, col, value = out.csr()
assert rowptr.tolist() == [0, 1, 2, 3]
assert col.tolist() == [0, 1, 2]
torch.jit.script(spspmm)
......@@ -2,9 +2,9 @@ from itertools import product
import pytest
import torch
from torch_sparse.tensor import SparseTensor
from .utils import devices
from torch_sparse.tensor import SparseTensor
from torch_sparse.testing import devices
try:
rowptr = torch.tensor([0, 1])
......
import pytest
import torch
from torch_sparse.tensor import SparseTensor
from .utils import devices, tensor
from torch_sparse.tensor import SparseTensor
from torch_sparse.testing import devices, tensor
@pytest.mark.parametrize('device', devices)
......
......@@ -2,9 +2,9 @@ from itertools import product
import pytest
import torch
from torch_sparse import spmm
from .utils import dtypes, devices, tensor
from torch_sparse import spmm
from torch_sparse.testing import devices, dtypes, tensor
@pytest.mark.parametrize('dtype,device', product(dtypes, devices))
......
......@@ -2,13 +2,16 @@ from itertools import product
import pytest
import torch
from torch_sparse import spspmm, SparseTensor
from .utils import grad_dtypes, devices, tensor
from torch_sparse import SparseTensor, spspmm
from torch_sparse.testing import devices, grad_dtypes, tensor
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_spspmm(dtype, device):
if dtype in {torch.half, torch.bfloat16}:
return # Not yet implemented.
indexA = torch.tensor([[0, 0, 1, 2, 2], [1, 2, 0, 0, 1]], device=device)
valueA = tensor([1, 2, 3, 4, 5], dtype, device)
indexB = torch.tensor([[0, 2], [1, 0]], device=device)
......@@ -21,6 +24,9 @@ def test_spspmm(dtype, device):
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
def test_sparse_tensor_spspmm(dtype, device):
if dtype in {torch.half, torch.bfloat16}:
return # Not yet implemented.
x = SparseTensor(
row=torch.tensor(
[0, 1, 1, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 9, 9],
......@@ -35,7 +41,7 @@ def test_sparse_tensor_spspmm(dtype, device):
], dtype=dtype, device=device),
)
expected = torch.eye(10, dtype=dtype, device=device)
expected = torch.eye(10, device=device).to(dtype)
out = x @ x.to_dense().t()
assert torch.allclose(out, expected, atol=1e-2)
......
......@@ -2,9 +2,9 @@ from itertools import product
import pytest
import torch
from torch_sparse.storage import SparseStorage
from .utils import dtypes, devices, tensor
from torch_sparse.storage import SparseStorage
from torch_sparse.testing import devices, dtypes, tensor
@pytest.mark.parametrize('device', devices)
......
......@@ -2,9 +2,9 @@ from itertools import product
import pytest
import torch
from torch_sparse import SparseTensor
from .utils import grad_dtypes, devices
from torch_sparse import SparseTensor
from torch_sparse.testing import devices, grad_dtypes
@pytest.mark.parametrize('dtype,device', product(grad_dtypes, devices))
......@@ -15,8 +15,8 @@ def test_getitem(dtype, device):
mat = torch.randn(m, n, dtype=dtype, device=device)
mat = SparseTensor.from_dense(mat)
idx1 = torch.randint(0, m, (k,), dtype=torch.long, device=device)
idx2 = torch.randint(0, n, (k,), dtype=torch.long, device=device)
idx1 = torch.randint(0, m, (k, ), dtype=torch.long, device=device)
idx2 = torch.randint(0, n, (k, ), dtype=torch.long, device=device)
bool1 = torch.zeros(m, dtype=torch.bool, device=device)
bool2 = torch.zeros(n, dtype=torch.bool, device=device)
bool1.scatter_(0, idx1, 1)
......
......@@ -2,9 +2,9 @@ from itertools import product
import pytest
import torch
from torch_sparse import transpose
from .utils import dtypes, devices, tensor
from torch_sparse import transpose
from torch_sparse.testing import devices, dtypes, tensor
@pytest.mark.parametrize('dtype,device', product(dtypes, devices))
......
* linguist-vendored
*.cc linguist-vendored=false
name: Linux
on:
push:
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-latest]
compiler: [g++, clang++]
flags: [-std=c++11, -std=c++17]
optimize: [-O2]
steps:
- name: Checkout
uses: actions/checkout@v2.0.0
- name: Build and test
env:
CXX: ${{ matrix.compiler }}
CXXFLAGS: ${{ matrix.flags }} ${{ matrix.optimize }}
run: |
mkdir build && cd build && cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -DCMAKE_BUILD_TYPE=Release .. && cmake --build . && make test
name: MacOS
on:
push:
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [macos-latest]
compiler: [g++, clang++]
flags: [-std=c++11, -std=c++17]
optimize: [-O2]
steps:
- name: Checkout
uses: actions/checkout@v2.0.0
- name: Build and test
env:
CXX: ${{ matrix.compiler }}
CXXFLAGS: ${{ matrix.flags }} ${{ matrix.optimize }}
run: |
mkdir build && cd build && cmake -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -DCMAKE_BUILD_TYPE=Release .. && cmake --build . && make test
name: Windows
on:
push:
pull_request:
branches: [ master ]
jobs:
build:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [windows-latest]
flags: ["/std:c++11", "/std:c++latest"]
optimize: [/O2]
steps:
- name: Checkout
uses: actions/checkout@v2.0.0
- name: Build and test
env:
CXX: ${{ matrix.compiler }}
CXXFLAGS: ${{ matrix.flags }} ${{ matrix.optimize }}
CTEST_OUTPUT_ON_FAILURE: 1
run: |
cmake -Bbuild -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON -DCMAKE_BUILD_TYPE=Release && cmake --build build --target ALL_BUILD && cmake --build build --target RUN_TESTS
VagrantFile
benchmark/build
benchmark/output
benchmark/charts.html
build*
.vagrant
.cache
.gdb_history
compile_commands.json
**/.vscode
TAGS
#[===================================================================[
parallel-hashmap library by Gregory Popovitch
CMake projects that wish to use this library may do
something like :
include(FetchContent)
FetchContent_Declare(
parallel-hashmap
GIT_REPOSITORY https://github.com/greg7mdp/parallel-hashmap.git
GIT_TAG v1.3.12 # adjust tag/branch/commit as needed
)
FetchContent_MakeAvailable(parallel-hashmap)
...
include_directories(${parallel-hashmap_SOURCE_DIR})
#]===================================================================]
cmake_minimum_required(VERSION 3.8)
list (APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
include(DetectVersion)
cmake_policy(SET CMP0048 NEW) ## set VERSION as documented by the project() command.
cmake_policy(SET CMP0076 NEW) ## accept new policy
set(CMAKE_CXX_STANDARD 11) ## compile with C++11 support
set(CMAKE_CXX_STANDARD_REQUIRED ON)
cmake_policy(SET CMP0048 NEW) ## set VERSION as documented by the project() command.
cmake_policy(SET CMP0076 NEW) ## accept new policy
if(NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 11) ## compile with C++11 support
endif()
if(NOT CMAKE_CXX_STANDARD_REQUIRED)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
endif()
if(NOT DEFINED PHMAP_MASTER_PROJECT)
set(PHMAP_MASTER_PROJECT OFF)
if(CMAKE_CURRENT_SOURCE_DIR STREQUAL CMAKE_SOURCE_DIR)
set(PHMAP_MASTER_PROJECT ON)
endif()
endif()
project(phmap VERSION ${DETECTED_PHMAP_VERSION} LANGUAGES CXX)
## ----------------------------- options -----------------------------
option(PHMAP_INSTALL "Enable installation" ${PHMAP_MASTER_PROJECT})
set(PHMAP_DIR parallel_hashmap)
set(PHMAP_HEADERS ${CMAKE_CURRENT_SOURCE_DIR}/${PHMAP_DIR}/phmap.h
${CMAKE_CURRENT_SOURCE_DIR}/${PHMAP_DIR}/phmap_base.h
${CMAKE_CURRENT_SOURCE_DIR}/${PHMAP_DIR}/phmap_bits.h
${CMAKE_CURRENT_SOURCE_DIR}/${PHMAP_DIR}/phmap_config.h
${CMAKE_CURRENT_SOURCE_DIR}/${PHMAP_DIR}/phmap_dump.h
${CMAKE_CURRENT_SOURCE_DIR}/${PHMAP_DIR}/phmap_fwd_decl.h
${CMAKE_CURRENT_SOURCE_DIR}/${PHMAP_DIR}/phmap_utils.h
${CMAKE_CURRENT_SOURCE_DIR}/${PHMAP_DIR}/phmap_config.h)
set(CMAKE_SUPPRESS_REGENERATION true) ## suppress ZERO_CHECK project
${CMAKE_CURRENT_SOURCE_DIR}/${PHMAP_DIR}/meminfo.h
${CMAKE_CURRENT_SOURCE_DIR}/${PHMAP_DIR}/btree.h)
include(GNUInstallDirs)
include(CMakePackageConfigHelpers)
include(helpers)
include_directories("${CMAKE_CURRENT_SOURCE_DIR}")
add_library(${PROJECT_NAME} INTERFACE)
......@@ -34,99 +69,110 @@ target_include_directories(
$<BUILD_INTERFACE:${PROJECT_SOURCE_DIR}>
$<INSTALL_INTERFACE:include>)
install(
DIRECTORY ${PROJECT_SOURCE_DIR}/${PHMAP_DIR}/
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PHMAP_DIR})
install(TARGETS ${PROJECT_NAME}
EXPORT ${PROJECT_NAME}-targets)
export(EXPORT ${PROJECT_NAME}-targets
FILE "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Targets.cmake")
if(PHMAP_INSTALL)
include(GNUInstallDirs)
include(CMakePackageConfigHelpers)
install(
DIRECTORY ${PROJECT_SOURCE_DIR}/${PHMAP_DIR}/
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/${PHMAP_DIR})
install(TARGETS ${PROJECT_NAME}
EXPORT ${PROJECT_NAME}-targets)
export(EXPORT ${PROJECT_NAME}-targets
FILE "${CMAKE_CURRENT_BINARY_DIR}/${PROJECT_NAME}Targets.cmake")
endif()
## ------------------------- building tests and examples -------------
option(PHMAP_BUILD_TESTS "Whether or not to build the tests" ON)
option(PHMAP_BUILD_EXAMPLES "Whether or not to build the examples" ON)
option(PHMAP_BUILD_TESTS "Whether or not to build the tests" ${PHMAP_MASTER_PROJECT})
option(PHMAP_BUILD_EXAMPLES "Whether or not to build the examples" ${PHMAP_MASTER_PROJECT})
if(MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /bigobj")
endif()
if (PHMAP_BUILD_TESTS)
include(cmake/DownloadGTest.cmake)
if (PHMAP_BUILD_TESTS OR PHMAP_BUILD_EXAMPLES)
include_directories(${PROJECT_SOURCE_DIR})
endif()
if (PHMAP_BUILD_TESTS)
check_target(gtest)
check_target(gtest_main)
check_target(gmock)
if (NOT PHMAP_GTEST_LIBS)
include(cmake/DownloadGTest.cmake)
check_target(gtest)
check_target(gtest_main)
check_target(gmock)
set(PHMAP_GTEST_LIBS gmock_main)
endif()
enable_testing()
## ---------------- regular hash maps ----------------------------
phmap_cc_test(NAME compressed_tuple SRCS "tests/compressed_tuple_test.cc"
DEPS gmock_main)
phmap_cc_test(NAME container_memory SRCS "tests/container_memory_test.cc"
DEPS gmock_main)
DEPS ${PHMAP_GTEST_LIBS})
phmap_cc_test(NAME hash_policy_testing SRCS "tests/hash_policy_testing_test.cc"
DEPS gmock_main)
DEPS ${PHMAP_GTEST_LIBS})
phmap_cc_test(NAME node_hash_policy SRCS "tests/node_hash_policy_test.cc"
DEPS gmock_main)
DEPS ${PHMAP_GTEST_LIBS})
phmap_cc_test(NAME raw_hash_set SRCS "tests/raw_hash_set_test.cc"
DEPS gmock_main)
DEPS ${PHMAP_GTEST_LIBS})
phmap_cc_test(NAME raw_hash_set_allocator SRCS "tests/raw_hash_set_allocator_test.cc"
DEPS gmock_main)
DEPS ${PHMAP_GTEST_LIBS})
## ---------------- regular hash maps ----------------------------
phmap_cc_test(NAME flat_hash_set SRCS "tests/flat_hash_set_test.cc"
COPTS "-DUNORDERED_SET_CXX17" DEPS gmock_main)
COPTS "-DUNORDERED_SET_CXX17" DEPS ${PHMAP_GTEST_LIBS})
phmap_cc_test(NAME flat_hash_map SRCS "tests/flat_hash_map_test.cc"
DEPS gmock_main)
DEPS ${PHMAP_GTEST_LIBS})
phmap_cc_test(NAME node_hash_map SRCS "tests/node_hash_map_test.cc"
DEPS gmock_main)
DEPS ${PHMAP_GTEST_LIBS})
phmap_cc_test(NAME node_hash_set SRCS "tests/node_hash_set_test.cc"
COPTS "-DUNORDERED_SET_CXX17" DEPS gmock_main)
COPTS "-DUNORDERED_SET_CXX17" DEPS ${PHMAP_GTEST_LIBS})
## --------------- parallel hash maps -----------------------------------------------
phmap_cc_test(NAME parallel_flat_hash_map SRCS "tests/parallel_flat_hash_map_test.cc"
COPTS "-DUNORDERED_MAP_CXX17" DEPS gmock_main)
COPTS "-DUNORDERED_MAP_CXX17" DEPS ${PHMAP_GTEST_LIBS})
phmap_cc_test(NAME parallel_flat_hash_set SRCS "tests/parallel_flat_hash_set_test.cc"
COPTS "-DUNORDERED_SET_CXX17" DEPS gmock_main)
COPTS "-DUNORDERED_SET_CXX17" DEPS ${PHMAP_GTEST_LIBS})
phmap_cc_test(NAME parallel_node_hash_map SRCS "tests/parallel_node_hash_map_test.cc"
DEPS gmock_main)
DEPS ${PHMAP_GTEST_LIBS})
phmap_cc_test(NAME parallel_node_hash_set SRCS "tests/parallel_node_hash_set_test.cc"
COPTS "-DUNORDERED_SET_CXX17" DEPS gmock_main)
COPTS "-DUNORDERED_SET_CXX17" DEPS ${PHMAP_GTEST_LIBS})
phmap_cc_test(NAME parallel_flat_hash_map_mutex SRCS "tests/parallel_flat_hash_map_mutex_test.cc"
COPTS "-DUNORDERED_MAP_CXX17" DEPS gmock_main)
COPTS "-DUNORDERED_MAP_CXX17" DEPS ${PHMAP_GTEST_LIBS})
phmap_cc_test(NAME dump_load SRCS "tests/dump_load_test.cc"
COPTS "-DUNORDERED_MAP_CXX17" DEPS gmock_main)
COPTS "-DUNORDERED_MAP_CXX17" DEPS ${PHMAP_GTEST_LIBS})
phmap_cc_test(NAME erase_if SRCS "tests/erase_if_test.cc"
COPTS "-DUNORDERED_MAP_CXX17" DEPS gmock_main)
COPTS "-DUNORDERED_MAP_CXX17" DEPS ${PHMAP_GTEST_LIBS})
## --------------- btree -----------------------------------------------
phmap_cc_test(NAME btree SRCS "tests/btree_test.cc"
CLOPTS "-w" DEPS gmock_main)
DEPS ${PHMAP_GTEST_LIBS})
endif()
if (PHMAP_BUILD_EXAMPLES)
if(NOT MSVC)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pedantic -Wall -Wextra -Wcast-align -Wcast-qual -Wdisabled-optimization -Winit-self -Wlogical-op -Wmissing-include-dirs -Woverloaded-virtual -Wredundant-decls -Wshadow -Wstrict-null-sentinel -Wswitch-default -Wno-unused -Wno-unknown-warning-option -Wno-gnu-zero-variadic-macro-arguments")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pedantic -Wall -Wextra -Wcast-align -Wcast-qual -Wdisabled-optimization -Winit-self -Wlogical-op -Wmissing-include-dirs -Woverloaded-virtual -Wredundant-decls -Wshadow -Wstrict-null-sentinel -Wswitch-default -Wno-unused")
if (NOT CMAKE_COMPILER_IS_GNUCC OR CMAKE_CXX_COMPILER_VERSION VERSION_GREATER 5.0)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unknown-warning-option -Wno-gnu-zero-variadic-macro-arguments")
endif()
else()
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4 /Zc:__cplusplus")
endif()
......@@ -142,15 +188,19 @@ if (PHMAP_BUILD_EXAMPLES)
add_executable(ex_lazy_emplace_l examples/lazy_emplace_l.cc phmap.natvis)
endif()
add_executable(ex_serialize examples/serialize.cc phmap.natvis)
target_include_directories(ex_serialize PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../cereal/include>)
#target_include_directories(ex_serialize PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../cereal/include>)
add_executable(ex_hash_std examples/hash_std.cc phmap.natvis)
add_executable(ex_hash_value examples/hash_value.cc phmap.natvis)
add_executable(ex_hash examples/hash.cc phmap.natvis)
add_executable(ex_two_files examples/f1.cc examples/f2.cc phmap.natvis)
add_executable(ex_insert_bench examples/insert_bench.cc phmap.natvis)
add_executable(ex_knucleotide examples/knucleotide.cc phmap.natvis)
add_executable(ex_dump_load examples/dump_load.cc phmap.natvis)
add_executable(ex_btree examples/btree.cc phmap.natvis)
add_executable(ex_hash_bench examples/hash_bench.cc phmap.natvis)
add_executable(ex_matt examples/matt.cc phmap.natvis)
add_executable(ex_mt_word_counter examples/mt_word_counter.cc phmap.natvis)
add_executable(ex_p_bench examples/p_bench.cc phmap.natvis)
target_link_libraries(ex_knucleotide Threads::Threads)
target_link_libraries(ex_bench Threads::Threads)
......
......@@ -25,7 +25,7 @@ This repository aims to provide a set of excellent **hash map** implementations,
- **Dump/load** feature: when a `flat` hash map stores data that is `std::trivially_copyable`, the table can be dumped to disk and restored as a single array, very efficiently, and without requiring any hash computation. This is typically about 10 times faster than doing element-wise serialization to disk, but it will use 10% to 60% extra disk space. See `examples/serialize.cc`. _(flat hash map/set only)_
- **Tested** on Windows (vs2015 & vs2017, vs2019, Intel compiler 18 and 19), linux (g++ 4.8.4, 5, 6, 7, 8, clang++ 3.9, 4.0, 5.0) and MacOS (g++ and clang++) - click on travis and appveyor icons above for detailed test status.
- **Tested** on Windows (vs2015 & vs2017, vs2019, vs2022, Intel compiler 18 and 19), linux (g++ 4.8, 5, 6, 7, 8, 9, 10, 11, 12, clang++ 3.9 to 16) and MacOS (g++ and clang++) - click on travis and appveyor icons above for detailed test status.
- Automatic support for **boost's hash_value()** method for providing the hash function (see `examples/hash_value.h`). Also default hash support for `std::pair` and `std::tuple`.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment