rt_mod_cuda.cc 3.42 KB
Newer Older
1
#include "codegen_cuda.h"
2
#include "runtime/cuda/cuda_module.h"
3
4
#include "runtime/pack_args.h"
#include <tvm/ffi/reflection/registry.h>
5
6
7
8

namespace tvm {
namespace codegen {

9
10
static std::unordered_map<std::string, runtime::FunctionInfo>
ExtractFuncInfo(const IRModule &mod) {
11
12
13
  std::unordered_map<std::string, runtime::FunctionInfo> fmap;

  for (auto kv : mod->functions) {
14
15
    ICHECK(kv.second->IsInstance<tir::PrimFuncNode>())
        << "Can only lower IR Module with PrimFuncs";
16
17
18
19
20
21
22
    auto f = Downcast<tir::PrimFunc>(kv.second);

    runtime::FunctionInfo info;
    for (size_t i = 0; i < f->params.size(); ++i) {
      if (f->params[i]->dtype.is_handle()) {
        auto ptr = f->params[i]->type_annotation.as<PointerTypeNode>();
        if (ptr && ptr->storage_scope == "grid_constant") {
23
          info.arg_types.push_back(DataType(runtime::kDLGridConstant, 64, 1));
24
25
26
27
28
29
          continue;
        }
      }
      info.arg_types.push_back(f->params[i].dtype());
    }
    if (auto opt = f->GetAttr<Array<String>>(tir::attr::kKernelLaunchParams)) {
30
      for (const auto &tag : opt.value()) {
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
        info.launch_param_tags.push_back(tag);
      }
    }
    auto global_symbol = f->GetAttr<String>(tvm::attr::kGlobalSymbol);
    fmap[static_cast<std::string>(global_symbol.value())] = info;
  }
  return fmap;
}

runtime::Module BuildTileLangCUDA(IRModule mod, Target target) {
  bool output_ssa = false;
  CodeGenTileLangCUDA cg;
  cg.Init(output_ssa);

  for (auto kv : mod->functions) {
46
47
    ICHECK(kv.second->IsInstance<PrimFuncNode>())
        << "CodeGenTileLangCUDA: Can only take PrimFunc";
48
    auto gvar = Downcast<GlobalVar>(kv.first);
49
50
51
    auto f = Downcast<PrimFunc>(kv.second);
    auto calling_conv = f->GetAttr<Integer>(tvm::attr::kCallingConv);
    ICHECK(calling_conv == CallingConv::kDeviceKernelLaunch);
52
    cg.AddFunction(gvar, f);
53
54
55
  }

  std::string code = cg.Finish();
56
57
58
  if (const auto f =
          ffi::Function::GetGlobal("tilelang_callback_cuda_postproc")) {
    code = (*f)(code, target).cast<std::string>();
59
60
61
  }
  std::string fmt = "ptx";
  std::string ptx;
62
63
64
  if (const auto f =
          ffi::Function::GetGlobal("tilelang_callback_cuda_compile")) {
    ptx = (*f)(code, target).cast<std::string>();
65
66
    if (ptx[0] != '/')
      fmt = "cubin";
67
68
69
70
71
72
  } else {
    ICHECK(0);
  }
  return runtime::CUDAModuleCreate(ptx, fmt, ExtractFuncInfo(mod), code);
}

73
runtime::Module BuildTileLangCUDAWithoutCompile(IRModule mod, Target target) {
74
75
76
77
78
  bool output_ssa = false;
  CodeGenTileLangCUDA cg;
  cg.Init(output_ssa);

  for (auto kv : mod->functions) {
79
80
    ICHECK(kv.second->IsInstance<PrimFuncNode>())
        << "CodeGenTileLangCUDA: Can only take PrimFunc";
81
    auto gvar = Downcast<GlobalVar>(kv.first);
82
83
84
    auto f = Downcast<PrimFunc>(kv.second);
    auto calling_conv = f->GetAttr<Integer>(tvm::attr::kCallingConv);
    ICHECK(calling_conv == CallingConv::kDeviceKernelLaunch);
85
    cg.AddFunction(gvar, f);
86
87
88
  }

  std::string code = cg.Finish();
89
90
91
  if (const auto f =
          ffi::Function::GetGlobal("tilelang_callback_cuda_postproc")) {
    code = (*f)(code, target).cast<std::string>();
92
  }
93
  return runtime::CUDAModuleCreate("ptx", "ptx", ExtractFuncInfo(mod), code);
94
95
}

96
97
98
99
100
101
102
TVM_FFI_STATIC_INIT_BLOCK({
  namespace refl = tvm::ffi::reflection;
  refl::GlobalDef()
      .def("target.build.tilelang_cuda", BuildTileLangCUDA)
      .def("target.build.tilelang_cuda_without_compile",
           BuildTileLangCUDAWithoutCompile);
});
103

104
105
} // namespace codegen
} // namespace tvm