multi_tensor_apply.cuh 5.21 KB
Newer Older
1
2
3
4
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/Exceptions.h>
5
#include <c10/cuda/CUDAGuard.h>
6
#include "compat.h"
7
8
9
10
11
12
13

#include <assert.h>

// #include <iostream>

// This header is the one-stop shop for all your multi-tensor apply needs.

14
15

// TODO:  Kernel arg size limit may be <4KB for some other cards (ie Jetson)
16
constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30};
17
constexpr int depth_to_max_blocks[5] = {2560, 2560, 2560, 2560, 2560};
18

19
template<int n> struct TensorListMetadata
20
21
22
{
  void* addresses[n][depth_to_max_tensors[n-1]];
  int sizes[depth_to_max_tensors[n-1]];
23
24
  unsigned char block_to_tensor[depth_to_max_blocks[n-1]];
  int block_to_chunk[depth_to_max_blocks[n-1]]; // I fear this needs to be a full int.
25
  int start_tensor_this_launch;
26
27
28
29
};


template<typename T, typename U, typename... ArgTypes>
30
31
32
#ifdef __HIP_PLATFORM_HCC__
__launch_bounds__(1024)
#endif
33
34
35
__global__ void multi_tensor_apply_kernel(
    int chunk_size,
    volatile int* noop_flag,
36
    T tl,
37
    U callable,
38
    ArgTypes... args)
39
40
{
  // Hand the chunk information to the user-supplied functor to process however it likes.
41
  callable(chunk_size, noop_flag, tl, args...);
42
43
44
45
46
47
48
49
50
51
52
}

template<int depth, typename T, typename... ArgTypes>
void multi_tensor_apply(
  int block_size,
  int chunk_size,
  const at::Tensor& noop_flag,
  const std::vector<std::vector<at::Tensor>>& tensor_lists,
  T callable,
  ArgTypes... args)
{
53
  TORCH_CHECK(tensor_lists.size() == depth, "tensor_lists.size() != depth");
54
  int len0 = tensor_lists[0].size();
55
  TORCH_CHECK(len0 > 0, "tensor_lists[0].size() is not > 0");
56
57
58
  auto ref_device = tensor_lists[0][0].device();
  TORCH_CHECK(ref_device.type() == at::kCUDA, "expected input to be on cuda");
  for (int l = 0; l < tensor_lists.size(); l++) // No range-based for because I need indices
59
  {
60
    TORCH_CHECK(tensor_lists[l].size() == len0, "Size mismatch among tensor lists");
61
62
    for(int t = 0; t < tensor_lists[l].size(); t++)
    {
63
      // TODO:  Print which tensor fails.
64
      bool contiguous_memory = (tensor_lists[l][t].is_sparse()) ? tensor_lists[l][t]._values().is_contiguous() : tensor_lists[l][t].is_contiguous();
65
#ifdef VERSION_GE_1_5
66
      contiguous_memory = (contiguous_memory || tensor_lists[l][t].is_contiguous(at::MemoryFormat::ChannelsLast) || tensor_lists[l][t].is_contiguous(at::MemoryFormat::ChannelsLast3d));
67
68
#endif
      TORCH_CHECK(contiguous_memory, "A tensor was not contiguous.");
69
      TORCH_CHECK(tensor_lists[l][t].device() == ref_device, "A tensor was not on the same device as the first tensor");
70
      TORCH_CHECK(tensor_lists[l][t].numel() == tensor_lists[0][t].numel(), "Size mismatch");
71
72
73
74
75
    }
  }

  int ntensors = tensor_lists[0].size();

76
  TensorListMetadata<depth> tl;
77

78
  const at::cuda::OptionalCUDAGuard device_guard(device_of(tensor_lists[0][0]));
79
  auto stream = at::cuda::getCurrentCUDAStream();
80

81
  tl.start_tensor_this_launch = 0;
82
83
84
85
86
  int loc_block_info = 0;
  int loc_tensor_info = 0;
  for(int t = 0; t < ntensors; t++)
  {
    tl.sizes[loc_tensor_info] = tensor_lists[0][t].numel();
87
88
89
90
    // skip empty tensors
    if (tl.sizes[loc_tensor_info] == 0) {
      continue;
    }
91
92
93
94
95
96
97
98
99
    for(int d = 0; d < depth; d++) {
      if (tensor_lists[d][t].is_sparse()) {
        at::Tensor dst = at::zeros(tensor_lists[d][t].sizes(), tensor_lists[d][t].options().layout(at::kStrided));
        dst.add_(tensor_lists[d][t]);
        tl.addresses[d][loc_tensor_info] = dst.data_ptr();
      } else {
        tl.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr();
      }
    }
100
101
102
103
104
105
106
107
108
109
    loc_tensor_info++;

    int chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1)/chunk_size;

    for(int chunk = 0; chunk < chunks_this_tensor; chunk++)
    {
      // std::cout << chunks_this_tensor << std::endl;
      tl.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
      tl.block_to_chunk[loc_block_info] = chunk;
      loc_block_info++;
110

111
112
113
114
115
116
117
118
119
      bool tensors_full = (loc_tensor_info == depth_to_max_tensors[depth-1] &&
                           chunk == chunks_this_tensor - 1);
      bool blocks_full = (loc_block_info == depth_to_max_blocks[depth-1]);
      bool last_chunk = (t == ntensors - 1 && chunk == chunks_this_tensor - 1);
      if(tensors_full || blocks_full || last_chunk)
      {
        // using accscalar_t = acc_type<scalar_t, true>;
        multi_tensor_apply_kernel<<<loc_block_info, block_size, 0, stream>>>(
          chunk_size,
mcarilli's avatar
mcarilli committed
120
          noop_flag.DATA_PTR<int>(),
121
          tl,
122
123
124
125
126
127
128
129
130
131
          callable,
          args...);

        AT_CUDA_CHECK(cudaGetLastError());

        // Reset.  The control flow possibilities here make my brain hurt.
        loc_block_info = 0;
        if(chunk == chunks_this_tensor - 1)
        {
          // std::cout << "Hit case 1 " << cond1 << " " << cond2 << " " << cond3 << std::endl;
132
          loc_tensor_info = 0;
133
          tl.start_tensor_this_launch = t + 1;
134
135
136
137
        }
        else
        {
          // std::cout << "Hit case 2 " << cond1 << " " << cond2 << " " << cond3 << std::endl;
Michael Carilli's avatar
Michael Carilli committed
138
          tl.sizes[0] = tl.sizes[loc_tensor_info-1];
139
140
141
          for(int d = 0; d < depth; d++)
            tl.addresses[d][0] = tl.addresses[d][loc_tensor_info-1];
          loc_tensor_info = 1;
142
          tl.start_tensor_this_launch = t;
143
144
145
146
147
        }
      }
    }
  }
}