config.nvidia.hpp.in 1.29 KB
Newer Older
1
2
3
#ifndef CK_CONFIG_NVIDIA_HPP
#define CK_CONFIG_NVIDIA_HPP

Chao Liu's avatar
Chao Liu committed
4
5
6
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <nvToolsExt.h>
7
8
9
10
11
12
13
14
15
16
17
18
19
20

// index type: unsigned or signed
#define CK_UNSIGNED_INDEX_TYPE 0

// device backend
#define CK_DEVICE_BACKEND_NVIDIA 1

// disable AMD inline asm and intrinsic
#define CK_USE_AMD_INLINE_ASM 0
#define CK_THREADWISE_GEMM_USE_AMD_INLINE_ASM 0
#define CK_USE_AMD_BUFFER_ADDRESSING 0
#define CK_USE_AMD_BUFFER_ADDRESSING_INTRINSIC 0
#define CK_USE_AMD_XDLOPS 0
#define CK_USE_AMD_XDLOPS_INLINE_ASM 0
Chao Liu's avatar
Chao Liu committed
21
#define CK_USE_AMD_XDLOPS_EMULATE 0
22
23
24
25
26
27
28
29
30
31
32
33
34

// experimental implementation
#define CK_EXPERIMENTAL_BLOCKWISE_GEMM_USE_PIPELINE 0
#define CK_EXPERIMENTAL_TENSOR_COORDINATE_USE_CALCULATE_OFFSET_DIFF 0
#define CK_EXPERIMENTAL_THREADWISE_COPY_V4R2_USE_OPTIMIZED_ADDRESS_CACLULATION 0
#define CK_EXPERIMENTAL_USE_MORE_COMPILE_STATIC_BLOCKWISE_GENERIC_SLICE_COPY_V1 0
#define CK_EXPERIMENTAL_USE_MORE_COMPILE_STATIC_THREADWISE_GENERIC_TENSOR_SLICE_COPY_V1R2 0
#define CK_EXPERIMENTAL_USE_MORE_COMPILE_STATIC_THREADWISE_GENERIC_TENSOR_SLICE_COPY_V2R1 0

namespace ck {

enum AddressSpace
{
Chao Liu's avatar
Chao Liu committed
35
36
37
38
    Generic,
    Global,
    Lds,
    Vgpr
Chao Liu's avatar
Chao Liu committed
39
40
41
42
};

enum InMemoryDataOperation
{
Chao Liu's avatar
Chao Liu committed
43
44
    Set,
    AtomicAdd
45
46
47
48
49
50
51
52
53
54
};

#if CK_UNSIGNED_INDEX_TYPE
using index_t = uint32_t;
#else
using index_t = int32_t;
#endif

} // namespace ck
#endif