Unverified Commit f3f4bf16 authored by thatPepe's avatar thatPepe Committed by GitHub
Browse files

Merge pull request #1055 from InfiniTensor/issue/1033b

issue/1033 - further fix nv lua for backward compatibility
parents e7a1b121 fc2500e5
......@@ -235,14 +235,14 @@ option_end()
-- Flash-Attn
option("flash-attn")
set_default(nil)
set_default("")
set_showmenu(true)
set_description("Path to flash-attention repo. If not set, flash-attention will not used.")
option_end()
if has_config("aten") then
add_defines("ENABLE_ATEN")
if get_config("flash-attn") ~= nil then
if get_config("flash-attn") ~= false then
add_defines("ENABLE_FLASH_ATTN")
end
end
......@@ -462,7 +462,7 @@ target("infinicore_cpp_api")
add_linkdirs(INFINI_ROOT.."/lib")
add_links("infiniop", "infinirt", "infiniccl")
if get_config("flash-attn") == true then
if get_config("flash-attn") ~= "" then
add_installfiles("(builddir)/$(plat)/$(arch)/$(mode)/flash-attn*.so", {prefixdir = "lib"})
if has_config("nv-gpu") then
add_deps("flash-attn-nvidia")
......
......@@ -145,7 +145,7 @@ target("flash-attn-nvidia")
add_links("cudart")
add_cugencodes("native")
if FLASH_ATTN_ROOT and FLASH_ATTN_ROOT ~= false and FLASH_ATTN_ROOT ~= "" then
if FLASH_ATTN_ROOT and FLASH_ATTN_ROOT ~= "" then
before_build(function (target)
local TORCH_DIR = os.iorunv("python", {"-c", "import torch, os; print(os.path.dirname(torch.__file__))"}):trim()
local PYTHON_INCLUDE = os.iorunv("python", {"-c", "import sysconfig; print(sysconfig.get_paths()['include'])"}):trim()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment