Commit 8fa76786 authored by Jeremy Reizenstein's avatar Jeremy Reizenstein Committed by Facebook GitHub Bot
Browse files

fix CPU-only hiding of cuda calls

Summary: CPU-only builds should be fixed by this change

Reviewed By: nikhilaravi

Differential Revision: D20598014

fbshipit-source-id: df098ec4c6c93d38515172805fe57cac7463c506
parent 595aca27
......@@ -63,10 +63,10 @@ torch::Tensor alphaCompositeForward(
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
CHECK_CONTIGUOUS_CUDA(points_idx);
return alphaCompositeCudaForward(features, alphas, points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
return alphaCompositeCudaForward(features, alphas, points_idx);
} else {
CHECK_CONTIGUOUS(features);
CHECK_CONTIGUOUS(alphas);
......@@ -92,12 +92,12 @@ std::tuple<torch::Tensor, torch::Tensor> alphaCompositeBackward(
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
CHECK_CONTIGUOUS_CUDA(points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
return alphaCompositeCudaBackward(
grad_outputs, features, alphas, points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
} else {
CHECK_CONTIGUOUS(grad_outputs);
CHECK_CONTIGUOUS(features);
......
......@@ -61,11 +61,11 @@ torch::Tensor weightedSumNormForward(
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
CHECK_CONTIGUOUS_CUDA(points_idx);
return weightedSumNormCudaForward(features, alphas, points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
return weightedSumNormCudaForward(features, alphas, points_idx);
} else {
CHECK_CONTIGUOUS(features);
CHECK_CONTIGUOUS(alphas);
......@@ -91,12 +91,12 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumNormBackward(
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
CHECK_CONTIGUOUS_CUDA(points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
return weightedSumNormCudaBackward(
grad_outputs, features, alphas, points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
} else {
CHECK_CONTIGUOUS(grad_outputs);
CHECK_CONTIGUOUS(features);
......
......@@ -61,11 +61,10 @@ torch::Tensor weightedSumForward(
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
CHECK_CONTIGUOUS_CUDA(points_idx);
return weightedSumCudaForward(features, alphas, points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
return weightedSumCudaForward(features, alphas, points_idx);
} else {
CHECK_CONTIGUOUS(features);
CHECK_CONTIGUOUS(alphas);
......@@ -91,11 +90,11 @@ std::tuple<torch::Tensor, torch::Tensor> weightedSumBackward(
CHECK_CONTIGUOUS_CUDA(features);
CHECK_CONTIGUOUS_CUDA(alphas);
CHECK_CONTIGUOUS_CUDA(points_idx);
return weightedSumCudaBackward(grad_outputs, features, alphas, points_idx);
#else
AT_ERROR("Not compiled with GPU support");
#endif
return weightedSumCudaBackward(grad_outputs, features, alphas, points_idx);
} else {
CHECK_CONTIGUOUS(grad_outputs);
CHECK_CONTIGUOUS(features);
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment