multi_tensor_scale_kernel.cu 4.02 KB
Newer Older
1
2
3
4
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
5
6
// Another possibility:
// #include <torch/all.h>
7
8

#include <assert.h>
9
10
11
12
13
// Stringstream is a big hammer, but I want to rely on operator<< for dtype.
#include <sstream>

#include "type_shim.h"
#include "multi_tensor_apply.cuh"
14
15
16
17

#define BLOCK_SIZE 512
#define ILP 4

18
template<typename in_t, typename out_t>
19
20
21
22
23
struct ScaleFunctor
{
   __device__ __forceinline__ void operator()(
    int chunk_size,
    volatile int* noop_gmem,
24
    TensorListMetadata<2>& tl,
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
    float scale)
  {
    __shared__ int noop_smem;

    if(threadIdx.x == 0)
      noop_smem = *noop_gmem;
    __syncthreads();
    if(noop_smem == 1)
      return;

    int tensor_loc = tl.block_to_tensor[blockIdx.x];
    int chunk_idx = tl.block_to_chunk[blockIdx.x];
    int n = tl.sizes[tensor_loc];

    in_t* in = (in_t*)tl.addresses[0][tensor_loc];
    in += chunk_idx*chunk_size;
   
42
    out_t* out = (out_t*)tl.addresses[1][tensor_loc];
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
    out += chunk_idx*chunk_size;

    n -= chunk_idx*chunk_size;

    // Non-divergent exit condition for the __syncthreads
    float incoming_vals[ILP];
    for(int i_start = 0;
        i_start < n && i_start < chunk_size;
        i_start += blockDim.x*ILP)
    {
      #pragma unroll
      for(int ii = 0; ii < ILP; ii++)
      {
        incoming_vals[ii] = 0;
        int i = i_start + threadIdx.x + ii*blockDim.x;
        if(i < n && i < chunk_size)
          incoming_vals[ii] = static_cast<float>(in[i]);
      }

      // note for clarification to future michael:
      // From a pure memory dependency perspective, there's likely no point unrolling
      // the write loop, since writes just fire off once their LDGs arrive.
      // Put another way, the STGs are dependent on the LDGs, but not on each other.
      // There is still compute ILP benefit from unrolling the loop though.
      #pragma unroll
      for(int ii = 0; ii < ILP; ii++)
      {
        int i = i_start + threadIdx.x + ii*blockDim.x;
        if(i < n && i < chunk_size)
          if(isfinite(incoming_vals[ii]))
73
            out[i] = static_cast<out_t>(incoming_vals[ii]*scale);
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
          else
            *noop_gmem = 1; // Blindly fire off a write.  These will race but that's ok.
      }

      // *noop_gmem = 1 is NOT guaranteed to be seen immediately by thread 0.  I wonder if
      // we can rig block-wide and grid-wide short-circuiting with only one syncthreads.
      // It's possible we can just lean on the cache (no smem or syncs) and still be fast.
      if(threadIdx.x == 0)
        noop_smem = *noop_gmem;
      __syncthreads();
      if(noop_smem == 1)
        break;
    }
  }
};

void multi_tensor_scale_cuda(
  int chunk_size,
  at::Tensor noop_flag,
  std::vector<std::vector<at::Tensor>> tensor_lists,
  float scale)
{
96
  using namespace at;
97
98
99
  // The output (downscaled) type is always float.
  // If build times suffer, think about where to put this dispatch,
  // and what logic should be moved out of multi_tensor_apply.
100
101

  AT_DISPATCH_FLOATING_TYPES_AND_HALF(TypeShim(tensor_lists[0][0].type()),
102
103
104
105
     "multi_tensor_scale_cuda",
     [&]
     {
       // using accscalar_t = acc_type<scalar_t, true>;
106
       switch(tensor_lists[1][0].scalar_type())
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
       {
         case at::ScalarType::Half:
           multi_tensor_apply<2>(
             BLOCK_SIZE,
             chunk_size,
             noop_flag,
             tensor_lists,
             ScaleFunctor<scalar_t, at::Half>(),
             scale);
           break;
         case at::ScalarType::Float:
           multi_tensor_apply<2>(
             BLOCK_SIZE,
             chunk_size,
             noop_flag,
             tensor_lists,
             ScaleFunctor<scalar_t, float>(),
             scale);
           break;
         default:
127
128
129
130
           std::stringstream ss;
           ss << "multi_tensor_scale_cuda not implemented for output type = "
              << tensor_lists[1][0].dtype();
           AT_ERROR(ss.str().c_str());
131
       }
132
133
134
135
136
137
     });

  AT_CUDA_CHECK(cudaGetLastError());

  // AT_CUDA_CHECK(cudaDeviceSynchronize());
}