rt_mod_cuda.cc 3.44 KB
Newer Older
1
#include "codegen_cuda.h"
2
#include "runtime/cuda/cuda_module.h"
3
4
#include "runtime/pack_args.h"
#include <tvm/ffi/reflection/registry.h>
5
6
7
8

namespace tvm {
namespace codegen {

9
10
static std::unordered_map<std::string, runtime::FunctionInfo>
ExtractFuncInfo(const IRModule &mod) {
11
12
13
  std::unordered_map<std::string, runtime::FunctionInfo> fmap;

  for (auto kv : mod->functions) {
14
15
    ICHECK(kv.second->IsInstance<tir::PrimFuncNode>())
        << "Can only lower IR Module with PrimFuncs";
16
17
18
19
20
21
22
    auto f = Downcast<tir::PrimFunc>(kv.second);

    runtime::FunctionInfo info;
    for (size_t i = 0; i < f->params.size(); ++i) {
      if (f->params[i]->dtype.is_handle()) {
        auto ptr = f->params[i]->type_annotation.as<PointerTypeNode>();
        if (ptr && ptr->storage_scope == "grid_constant") {
23
          info.arg_types.push_back(DataType(runtime::kDLGridConstant, 64, 1));
24
25
26
27
28
          continue;
        }
      }
      info.arg_types.push_back(f->params[i].dtype());
    }
29
30
    if (auto opt = f->GetAttr<ffi::Array<ffi::String>>(
            tir::attr::kKernelLaunchParams)) {
31
      for (const auto &tag : opt.value()) {
32
33
34
        info.launch_param_tags.push_back(tag);
      }
    }
35
    auto global_symbol = f->GetAttr<ffi::String>(tvm::attr::kGlobalSymbol);
36
37
38
39
40
    fmap[static_cast<std::string>(global_symbol.value())] = info;
  }
  return fmap;
}

41
ffi::Module BuildTileLangCUDA(IRModule mod, Target target) {
42
43
44
45
46
  bool output_ssa = false;
  CodeGenTileLangCUDA cg;
  cg.Init(output_ssa);

  for (auto kv : mod->functions) {
47
48
    ICHECK(kv.second->IsInstance<PrimFuncNode>())
        << "CodeGenTileLangCUDA: Can only take PrimFunc";
49
    auto gvar = Downcast<GlobalVar>(kv.first);
50
51
52
    auto f = Downcast<PrimFunc>(kv.second);
    auto calling_conv = f->GetAttr<Integer>(tvm::attr::kCallingConv);
    ICHECK(calling_conv == CallingConv::kDeviceKernelLaunch);
53
    cg.AddFunction(gvar, f);
54
55
56
  }

  std::string code = cg.Finish();
57
58
59
  if (const auto f =
          ffi::Function::GetGlobal("tilelang_callback_cuda_postproc")) {
    code = (*f)(code, target).cast<std::string>();
60
61
62
  }
  std::string fmt = "ptx";
  std::string ptx;
63
64
65
  if (const auto f =
          ffi::Function::GetGlobal("tilelang_callback_cuda_compile")) {
    ptx = (*f)(code, target).cast<std::string>();
66
67
    if (ptx[0] != '/')
      fmt = "cubin";
68
69
70
71
72
73
  } else {
    ICHECK(0);
  }
  return runtime::CUDAModuleCreate(ptx, fmt, ExtractFuncInfo(mod), code);
}

74
ffi::Module BuildTileLangCUDAWithoutCompile(IRModule mod, Target target) {
75
76
77
78
79
  bool output_ssa = false;
  CodeGenTileLangCUDA cg;
  cg.Init(output_ssa);

  for (auto kv : mod->functions) {
80
81
    ICHECK(kv.second->IsInstance<PrimFuncNode>())
        << "CodeGenTileLangCUDA: Can only take PrimFunc";
82
    auto gvar = Downcast<GlobalVar>(kv.first);
83
84
85
    auto f = Downcast<PrimFunc>(kv.second);
    auto calling_conv = f->GetAttr<Integer>(tvm::attr::kCallingConv);
    ICHECK(calling_conv == CallingConv::kDeviceKernelLaunch);
86
    cg.AddFunction(gvar, f);
87
88
89
  }

  std::string code = cg.Finish();
90
91
92
  if (const auto f =
          ffi::Function::GetGlobal("tilelang_callback_cuda_postproc")) {
    code = (*f)(code, target).cast<std::string>();
93
  }
94
  return runtime::CUDAModuleCreate("ptx", "ptx", ExtractFuncInfo(mod), code);
95
96
}

97
TVM_FFI_STATIC_INIT_BLOCK() {
98
99
100
101
102
  namespace refl = tvm::ffi::reflection;
  refl::GlobalDef()
      .def("target.build.tilelang_cuda", BuildTileLangCUDA)
      .def("target.build.tilelang_cuda_without_compile",
           BuildTileLangCUDAWithoutCompile);
103
}
104

105
106
} // namespace codegen
} // namespace tvm