index_select_impl.hip 7.58 KB
Newer Older
sangwzh's avatar
sangwzh committed
1
2
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
3
4
/**
 *  Copyright (c) 2023 by Contributors
5
 *  Copyright (c) 2023, GT-TDAlab (Muhammed Fatih Balin & Umit V. Catalyurek)
6
7
8
 * @file cuda/index_select_impl.cu
 * @brief Index select operator implementation on CUDA.
 */
9
#include <c10/core/ScalarType.h>
10
#include <graphbolt/cuda_ops.h>
11
12
13

#include <numeric>

sangwzh's avatar
sangwzh committed
14
15
16
#include "common.h"
#include "max_uva_threads.h"
#include "utils.h"
17
18
19
20

namespace graphbolt {
namespace ops {

21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
/** @brief Index select operator implementation for feature size 1. */
template <typename DType, typename IdType>
__global__ void IndexSelectSingleKernel(
    const DType* input, const int64_t input_len, const IdType* index,
    const int64_t output_len, DType* output,
    const int64_t* permutation = nullptr) {
  int64_t out_row_index = blockIdx.x * blockDim.x + threadIdx.x;
  int stride = gridDim.x * blockDim.x;
  while (out_row_index < output_len) {
    assert(index[out_row_index] >= 0 && index[out_row_index] < input_len);
    const auto out_row =
        permutation ? permutation[out_row_index] : out_row_index;
    output[out_row] = input[index[out_row_index]];
    out_row_index += stride;
  }
}

/**
 * @brief Index select operator implementation for feature size > 1.
 */
41
42
43
44
template <typename DType, typename IdType>
__global__ void IndexSelectMultiKernel(
    const DType* const input, const int64_t input_len,
    const int64_t feature_size, const IdType* const index,
45
46
    const int64_t output_len, DType* const output,
    const int64_t* permutation = nullptr) {
47
48
49
50
51
52
53
54
  int64_t out_row_index = blockIdx.x * blockDim.y + threadIdx.y;

  const int64_t stride = blockDim.y * gridDim.x;

  while (out_row_index < output_len) {
    int64_t column = threadIdx.x;
    const int64_t in_row = index[out_row_index];
    assert(in_row >= 0 && in_row < input_len);
55
56
    const auto out_row =
        permutation ? permutation[out_row_index] : out_row_index;
57
    while (column < feature_size) {
58
      output[out_row * feature_size + column] =
59
60
61
62
63
64
65
          input[in_row * feature_size + column];
      column += blockDim.x;
    }
    out_row_index += stride;
  }
}

66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
/**
 * @brief Index select operator implementation for feature size > 1.
 *
 * @note This is a cross-device access version of IndexSelectMultiKernel. Since
 * the memory access over PCIe is more sensitive to the data access aligment
 * (cacheline), we need a separate version here.
 */
template <typename DType, typename IdType>
__global__ void IndexSelectMultiKernelAligned(
    const DType* const input, const int64_t input_len,
    const int64_t feature_size, const IdType* const index,
    const int64_t output_len, DType* const output,
    const int64_t* permutation = nullptr) {
  int64_t out_row_index = blockIdx.x * blockDim.y + threadIdx.y;

  const int64_t stride = blockDim.y * gridDim.x;

  while (out_row_index < output_len) {
    int64_t col = threadIdx.x;
    const int64_t in_row = index[out_row_index];
    assert(in_row >= 0 && in_row < input_len);
    const int64_t idx_offset =
        ((uint64_t)(&input[in_row * feature_size]) % GPU_CACHE_LINE_SIZE) /
        sizeof(DType);
    col = col - idx_offset;
    const auto out_row =
        permutation ? permutation[out_row_index] : out_row_index;
    while (col < feature_size) {
      if (col >= 0)
        output[out_row * feature_size + col] =
            input[in_row * feature_size + col];
      col += blockDim.x;
    }
    out_row_index += stride;
  }
}

103
104
105
106
template <typename DType, typename IdType>
torch::Tensor UVAIndexSelectImpl_(torch::Tensor input, torch::Tensor index) {
  const int64_t input_len = input.size(0);
  const int64_t return_len = index.size(0);
107
108
109
110
  const int64_t original_feature_size = std::accumulate(
      input.sizes().begin() + 1, input.sizes().end(), 1ll, std::multiplies<>());
  const auto aligned_feature_size =
      input.element_size() * original_feature_size / sizeof(DType);
111
  torch::Tensor ret = torch::empty(
112
113
114
115
116
      {return_len, original_feature_size}, torch::TensorOptions()
                                               .dtype(input.dtype())
                                               .device(c10::DeviceType::CUDA));
  DType* input_ptr = reinterpret_cast<DType*>(input.data_ptr());
  DType* ret_ptr = reinterpret_cast<DType*>(ret.data_ptr());
117
118
119

  // Sort the index to improve the memory access pattern.
  torch::Tensor sorted_index, permutation;
120
121
  std::tie(sorted_index, permutation) =
      Sort(index, cuda::NumberOfBits(input_len));
122
123
124
  const IdType* index_sorted_ptr = sorted_index.data_ptr<IdType>();
  const int64_t* permutation_ptr = permutation.data_ptr<int64_t>();

125
  if (aligned_feature_size == 1) {
126
127
    // Use a single thread to process each output row to avoid wasting threads.
    const int num_threads = cuda::FindNumThreads(return_len);
128
    const int num_blocks =
sangwzh's avatar
sangwzh committed
129
        (::min(return_len, cuda::max_uva_threads.value_or(1 << 20)) +
130
131
         num_threads - 1) /
        num_threads;
132
    CUDA_KERNEL_CALL(
133
        IndexSelectSingleKernel, num_blocks, num_threads, 0, input_ptr,
134
        input_len, index_sorted_ptr, return_len, ret_ptr, permutation_ptr);
135
  } else {
136
    constexpr int BLOCK_SIZE = CUDA_MAX_NUM_THREADS;
137
    dim3 block(BLOCK_SIZE, 1);
138
    while (static_cast<int64_t>(block.x) >= 2 * aligned_feature_size) {
139
140
141
      block.x >>= 1;
      block.y <<= 1;
    }
sangwzh's avatar
sangwzh committed
142
    const dim3 grid(::min(
143
144
        (return_len + block.y - 1) / block.y,
        cuda::max_uva_threads.value_or(1 << 20) / BLOCK_SIZE));
145
    if (aligned_feature_size * sizeof(DType) <= GPU_CACHE_LINE_SIZE) {
146
147
      // When feature size is smaller than GPU cache line size, use unaligned
      // version for less SM usage, which is more resource efficient.
148
      CUDA_KERNEL_CALL(
149
          IndexSelectMultiKernel, grid, block, 0, input_ptr, input_len,
150
151
          aligned_feature_size, index_sorted_ptr, return_len, ret_ptr,
          permutation_ptr);
152
153
    } else {
      // Use aligned version to improve the memory access pattern.
154
      CUDA_KERNEL_CALL(
155
156
157
          IndexSelectMultiKernelAligned, grid, block, 0, input_ptr, input_len,
          aligned_feature_size, index_sorted_ptr, return_len, ret_ptr,
          permutation_ptr);
158
    }
159
  }
160

161
162
163
164
165
166
167
168
169
170
  auto return_shape = std::vector<int64_t>({return_len});
  return_shape.insert(
      return_shape.end(), input.sizes().begin() + 1, input.sizes().end());
  ret = ret.reshape(return_shape);
  return ret;
}

/**
 * @brief UVA index select operator implementation on CUDA.
 *
171
 * All basic torch types are supported for input.
172
173
174
 * The supporting index types are: int, int64_t.
 */
torch::Tensor UVAIndexSelectImpl(torch::Tensor input, torch::Tensor index) {
175
176
177
178
179
180
181
182
183
184
185
186
187
188
  return AT_DISPATCH_INDEX_TYPES(
      index.scalar_type(), "UVAIndexSelectImpl", ([&] {
        const auto ptr = (size_t)input.data_ptr();
        const int64_t feature_size = std::accumulate(
            input.sizes().begin() + 1, input.sizes().end(), 1ll,
            std::multiplies<>());
        // We perform the copy with datatype of size powers of 2, and the
        // maximum data type we use has 16 bytes. We check the alignment of the
        // pointer and the feature dimensionality to determine the largest
        // type to use for the copy to minimize the number of CUDA threads used.
        // Alignment denotes the maximum suitable alignment and datatype size
        // for the copies.
        const int aligned_access_size =
            std::gcd(16, std::gcd(ptr, input.element_size() * feature_size));
189
190
191
192
        return GRAPHBOLT_DISPATCH_ELEMENT_SIZES(
            aligned_access_size, "UVAIndexSelectImplElementSize", ([&] {
              return UVAIndexSelectImpl_<element_size_t, index_t>(input, index);
            }));
193
      }));
194
195
196
197
}

}  //  namespace ops
}  //  namespace graphbolt