config_nvidia.hpp.in 1.4 KB
Newer Older
Chao Liu's avatar
Chao Liu committed
1
2
#ifndef CK_CONFIG_NVIDIA_HPP
#define CK_CONFIG_NVIDIA_HPP
3

Chao Liu's avatar
Chao Liu committed
4
#cmakedefine01 CK_DEVICE_BACKEND_NVIDIA
Chao Liu's avatar
Chao Liu committed
5
6

#include "cuda_runtime.h"
Chao Liu's avatar
Chao Liu committed
7
#include "cuda_fp16.h"
Chao Liu's avatar
Chao Liu committed
8
9
#include "nvToolsExt.h"
#include "helper_cuda.h"
10
#define CK_USE_AMD_INLINE_ASM 0
Chao Liu's avatar
Chao Liu committed
11

12
13
namespace ck {

Chao Liu's avatar
Chao Liu committed
14
15
16
17
18
19
// For some reason, CUDA need this definition, otherwise
//   compiler won't generate optimal load and store instruction, and
//   kernel would produce wrong result, indicating the compiler fail to generate correct
//   instruction,
using float2_t = float2;
using float4_t = float4;
Chao Liu's avatar
Chao Liu committed
20
21

using index_t = uint32_t;
Chao Liu's avatar
Chao Liu committed
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47

__device__ void fused_multiply_accumulate(float& d, const float& s0, const float& s1)
{
    d += s0 * s1;
}

#if 0
__device__ void fused_multiply_accumulate(half& d, const half& s0, const half& s1) { d += s0 * s1; }

__device__ void fused_multiply_accumulate(half& d, const half2& s0, const half2& s1)
{
    d += s0.x * s1.x;
    d += s0.y * s1.y;
}

__device__ void fused_multiply_accumulate(float& d, const half2& s0, const half2& s1)
{
    d += s0.x * s1.x + s0.y * s1.y;
}

__device__ void fused_multiply_accumulate(char& d, const char& s0, const char& s1) { d += s0 * s1; }

// TODO:: this interface is misleading, s0, s1 are actually int8x4
//  need to make a better interface
__device__ void fused_multiply_accumulate(int32_t& d, const int32_t& s0, const int32_t& s1)
{
Chao Liu's avatar
Chao Liu committed
48
#if CK_DEVICE_BACKEND_NVIDIA
Chao Liu's avatar
Chao Liu committed
49
50
51
52
    d = __dp4a(s0, s1, d);
#endif
}
#endif
53
54
55
56

} // namespace ck

#endif