Unverified Commit 9b7c7d39 authored by Edward Z. Yang's avatar Edward Z. Yang Committed by GitHub
Browse files

Alert non-deterministic on kernels that use gpuAtomicAdd (#7582)


Signed-off-by: default avatarEdward Z. Yang <ezyang@meta.com>
parent c8cd3ff9
...@@ -426,6 +426,8 @@ void compute_grad_input( ...@@ -426,6 +426,8 @@ void compute_grad_input(
// Checks if num_kernels or columns numel larger than 2 ** 31 // Checks if num_kernels or columns numel larger than 2 ** 31
use_64bits_indexing |= num_kernels > (1 << 31); use_64bits_indexing |= num_kernels > (1 << 31);
at::globalContext().alertNotDeterministic("compute_grad_input");
if (use_64bits_indexing) { if (use_64bits_indexing) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
columns.scalar_type(), "compute_grad_input", ([&] { columns.scalar_type(), "compute_grad_input", ([&] {
......
...@@ -412,6 +412,8 @@ at::Tensor ps_roi_align_backward_kernel( ...@@ -412,6 +412,8 @@ at::Tensor ps_roi_align_backward_kernel(
int channels_out = channels / (pooled_height * pooled_width); int channels_out = channels / (pooled_height * pooled_width);
at::globalContext().alertNotDeterministic("ps_roi_align_backward_kernel");
auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); auto grad_ = grad.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "ps_roi_align_backward_kernel", [&] { grad.scalar_type(), "ps_roi_align_backward_kernel", [&] {
......
...@@ -251,6 +251,8 @@ at::Tensor ps_roi_pool_backward_kernel( ...@@ -251,6 +251,8 @@ at::Tensor ps_roi_pool_backward_kernel(
int channels_out = channels / (pooled_height * pooled_width); int channels_out = channels / (pooled_height * pooled_width);
at::globalContext().alertNotDeterministic("ps_roi_pool_backward_kernel");
auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); auto grad_ = grad.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "ps_roi_pool_backward_kernel", [&] { grad.scalar_type(), "ps_roi_pool_backward_kernel", [&] {
......
...@@ -421,6 +421,8 @@ at::Tensor roi_align_backward_kernel( ...@@ -421,6 +421,8 @@ at::Tensor roi_align_backward_kernel(
int h_stride = grad.stride(2); int h_stride = grad.stride(2);
int w_stride = grad.stride(3); int w_stride = grad.stride(3);
at::globalContext().alertNotDeterministic("roi_align_backward_kernel");
auto rois_ = rois.contiguous(); auto rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "roi_align_backward_kernel", [&] { grad.scalar_type(), "roi_align_backward_kernel", [&] {
......
...@@ -232,6 +232,8 @@ at::Tensor roi_pool_backward_kernel( ...@@ -232,6 +232,8 @@ at::Tensor roi_pool_backward_kernel(
int h_stride = grad.stride(2); int h_stride = grad.stride(2);
int w_stride = grad.stride(3); int w_stride = grad.stride(3);
at::globalContext().alertNotDeterministic("roi_pool_backward_kernel");
auto argmax_ = argmax.contiguous(), rois_ = rois.contiguous(); auto argmax_ = argmax.contiguous(), rois_ = rois.contiguous();
AT_DISPATCH_FLOATING_TYPES_AND_HALF( AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad.scalar_type(), "roi_pool_backward_kernel", [&] { grad.scalar_type(), "roi_pool_backward_kernel", [&] {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment