Commit 6d2704b2 authored by rusty1s's avatar rusty1s
Browse files

clean up

parent bcf3a398
#include <Python.h> #include <Python.h>
#include <torch/script.h> #include <torch/script.h>
#include "cpu/scatter_cpu.h" // #include "cpu/scatter_cpu.h"
#include "utils.h" // #include "utils.h"
#ifdef WITH_CUDA // #ifdef WITH_CUDA
#include "cuda/scatter_cuda.h" // #include <cuda.h>
#endif // #include "cuda/scatter_cuda.h"
// #endif
#ifdef _WIN32 #ifdef _WIN32
#if PY_MAJOR_VERSION < 3
PyMODINIT_FUNC init_C(void) { return NULL; }
#else
PyMODINIT_FUNC PyInit__C(void) { return NULL; } PyMODINIT_FUNC PyInit__C(void) { return NULL; }
#endif #endif
#endif
std::tuple<torch::Tensor, torch::optional<torch::Tensor>> std::tuple<torch::Tensor, torch::optional<torch::Tensor>>
scatter_fw(torch::Tensor src, torch::Tensor index, int64_t dim, scatter_fw(torch::Tensor src, torch::Tensor index, int64_t dim,
torch::optional<torch::Tensor> optional_out, torch::optional<torch::Tensor> optional_out,
torch::optional<int64_t> dim_size, std::string reduce) { torch::optional<int64_t> dim_size, std::string reduce) {
if (src.device().is_cuda()) { return std::make_tuple(src, optional_out);
#ifdef WITH_CUDA // if (src.device().is_cuda()) {
return scatter_cuda(src, index, dim, optional_out, dim_size, reduce); // #ifdef WITH_CUDA
#else // return scatter_cuda(src, index, dim, optional_out, dim_size, reduce);
AT_ERROR("Not compiled with CUDA support"); // #else
#endif // AT_ERROR("Not compiled with CUDA support");
} else { // #endif
return scatter_cpu(src, index, dim, optional_out, dim_size, reduce); // } else {
} // return scatter_cpu(src, index, dim, optional_out, dim_size, reduce);
// }
} }
static auto registry = static auto registry =
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment