common.h 984 Bytes
Newer Older
rusty1s's avatar
rusty1s committed
1
2
3
#ifndef THC_COMMON_INC
#define THC_COMMON_INC

rusty1s's avatar
rusty1s committed
4
5
#define THCTensor_(NAME) TH_CONCAT_4(TH,CReal,Tensor_,NAME)

rusty1s's avatar
rusty1s committed
6
#define KERNEL_LOOP(I, N) \
rusty1s's avatar
rusty1s committed
7
  for (ptrdiff_t I = blockIdx.x * blockDim.x + threadIdx.x; I < N; I += blockDim.x * gridDim.x)
rusty1s's avatar
rusty1s committed
8
9
10
11

#define THC_assertSameGPU(...) THAssertMsg(THCTensor_(checkGPU)(__VA_ARGS__), \
  "Some of the input tensors are located on different GPUs. Please move them to a single one.")

rusty1s's avatar
rusty1s committed
12
13
const int MAX_DIMS = 25;
const int NUM_THREADS = 1024;
rusty1s's avatar
rusty1s committed
14
15

inline int GET_BLOCKS(const int N) {
rusty1s's avatar
rusty1s committed
16
  return (N + NUM_THREADS - 1) / NUM_THREADS;
rusty1s's avatar
rusty1s committed
17
18
19
20
21
22
23
24
}

#define KERNEL_RUN(NAME, N, ...) \
  int grid = GET_BLOCKS(N); \
  cudaStream_t stream = THCState_getCurrentStream(state); \
  NAME<real><<<grid, NUM_THREADS, 0, stream>>>(__VA_ARGS__, N); \
  THCudaCheck(cudaGetLastError())

rusty1s's avatar
rusty1s committed
25
26
27
28
29
30
31
32
33
34
template<typename T>
struct TensorInfo {
  T *data;
  int dims;
  int size[MAX_DIMS];
  int stride[MAX_DIMS];
};

#include "generic/common.h"
#include "THC/THCGenerateAllTypes.h"
rusty1s's avatar
rusty1s committed
35
36

#endif  // THC_COMMON_INC