config.hpp.in 1.8 KB
Newer Older
1
2
3
#ifndef CK_CONFIG_HPP
#define CK_CONFIG_HPP

Chao Liu's avatar
Chao Liu committed
4
5
6
7
8
#cmakedefine01 DEVICE_BACKEND_HIP
#cmakedefine01 DEVICE_BACKEND_CUDA

#if DEVICE_BACKEND_HIP
#include "hip/hip_runtime.h"
Chao Liu's avatar
Chao Liu committed
9
#include "hip/hip_fp16.h"
10
#define CK_USE_AMD_INLINE_ASM 1
Chao Liu's avatar
Chao Liu committed
11

Chao Liu's avatar
Chao Liu committed
12
13
#elif DEVICE_BACKEND_CUDA
#include "cuda_runtime.h"
Chao Liu's avatar
Chao Liu committed
14
#include "cuda_fp16.h"
Chao Liu's avatar
Chao Liu committed
15
16
#include "nvToolsExt.h"
#include "helper_cuda.h"
17
18
#define CK_USE_AMD_INLINE_ASM 0
#endif
Chao Liu's avatar
Chao Liu committed
19

20
21
22
23
24
25
26
27
namespace ck {

#if DEVICE_BACKEND_HIP
// For some reason, HIP compiler need this definition to generate optimal load and store
// instruction
typedef float float2_t __attribute__((ext_vector_type(2)));
typedef float float4_t __attribute__((ext_vector_type(4)));
#else
Chao Liu's avatar
Chao Liu committed
28
29
30
31
32
33
// For some reason, CUDA need this definition, otherwise
//   compiler won't generate optimal load and store instruction, and
//   kernel would produce wrong result, indicating the compiler fail to generate correct
//   instruction,
using float2_t = float2;
using float4_t = float4;
Chao Liu's avatar
Chao Liu committed
34
#endif
Chao Liu's avatar
Chao Liu committed
35
36

using index_t = uint32_t;
Chao Liu's avatar
Chao Liu committed
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67

__device__ void fused_multiply_accumulate(float& d, const float& s0, const float& s1)
{
    d += s0 * s1;
}

#if 0
__device__ void fused_multiply_accumulate(half& d, const half& s0, const half& s1) { d += s0 * s1; }

__device__ void fused_multiply_accumulate(half& d, const half2& s0, const half2& s1)
{
    d += s0.x * s1.x;
    d += s0.y * s1.y;
}

__device__ void fused_multiply_accumulate(float& d, const half2& s0, const half2& s1)
{
    d += s0.x * s1.x + s0.y * s1.y;
}

__device__ void fused_multiply_accumulate(char& d, const char& s0, const char& s1) { d += s0 * s1; }

// TODO:: this interface is misleading, s0, s1 are actually int8x4
//  need to make a better interface
__device__ void fused_multiply_accumulate(int32_t& d, const int32_t& s0, const int32_t& s1)
{
#if DEVICE_BACKEND_CUDA
    d = __dp4a(s0, s1, d);
#endif
}
#endif
68
69
70
71

} // namespace ck

#endif