Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ollama
Commits
cc5a71e0
Unverified
Commit
cc5a71e0
authored
Apr 23, 2024
by
Daniel Hiltgen
Committed by
GitHub
Apr 23, 2024
Browse files
Merge pull request #3709 from remy415/custom-gpu-defs
Adds support for customizing GPU build flags in llama.cpp
parents
e83bcf7f
9c0db4cc
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
25 additions
and
2 deletions
+25
-2
llm/generate/gen_linux.sh
llm/generate/gen_linux.sh
+15
-1
llm/generate/gen_windows.ps1
llm/generate/gen_windows.ps1
+10
-1
No files found.
llm/generate/gen_linux.sh
View file @
cc5a71e0
...
@@ -172,7 +172,15 @@ if [ -d "${CUDA_LIB_DIR}" ]; then
...
@@ -172,7 +172,15 @@ if [ -d "${CUDA_LIB_DIR}" ]; then
# Disabling has minimal performance effect while maintaining compatibility.
# Disabling has minimal performance effect while maintaining compatibility.
ARM64_DEFS
=
"-DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_CUDA_F16=off"
ARM64_DEFS
=
"-DLLAMA_AVX=off -DLLAMA_AVX2=off -DLLAMA_AVX512=off -DLLAMA_CUDA_F16=off"
fi
fi
CMAKE_DEFS
=
"-DLLAMA_CUDA=on -DLLAMA_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=
${
CMAKE_CUDA_ARCHITECTURES
}
${
COMMON_CMAKE_DEFS
}
${
CMAKE_DEFS
}
${
ARM64_DEFS
}
"
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
if
[
-n
"
${
OLLAMA_CUSTOM_CUDA_DEFS
}
"
]
;
then
echo
"OLLAMA_CUSTOM_CUDA_DEFS=
\"
${
OLLAMA_CUSTOM_CUDA_DEFS
}
\"
"
CMAKE_CUDA_DEFS
=
"-DLLAMA_CUDA=on -DCMAKE_CUDA_ARCHITECTURES=
${
CMAKE_CUDA_ARCHITECTURES
}
${
OLLAMA_CUSTOM_CUDA_DEFS
}
"
echo
"Building custom CUDA GPU"
else
CMAKE_CUDA_DEFS
=
"-DLLAMA_CUDA=on -DLLAMA_CUDA_FORCE_MMQ=on -DCMAKE_CUDA_ARCHITECTURES=
${
CMAKE_CUDA_ARCHITECTURES
}
"
fi
CMAKE_DEFS
=
"
${
COMMON_CMAKE_DEFS
}
${
CMAKE_DEFS
}
${
ARM64_DEFS
}
${
CMAKE_CUDA_DEFS
}
"
BUILD_DIR
=
"../build/linux/
${
ARCH
}
/cuda
${
CUDA_VARIANT
}
"
BUILD_DIR
=
"../build/linux/
${
ARCH
}
/cuda
${
CUDA_VARIANT
}
"
EXTRA_LIBS
=
"-L
${
CUDA_LIB_DIR
}
-lcudart -lcublas -lcublasLt -lcuda"
EXTRA_LIBS
=
"-L
${
CUDA_LIB_DIR
}
-lcudart -lcublas -lcublasLt -lcuda"
build
build
...
@@ -217,6 +225,12 @@ if [ -d "${ROCM_PATH}" ]; then
...
@@ -217,6 +225,12 @@ if [ -d "${ROCM_PATH}" ]; then
fi
fi
init_vars
init_vars
CMAKE_DEFS
=
"
${
COMMON_CMAKE_DEFS
}
${
CMAKE_DEFS
}
-DLLAMA_HIPBLAS=on -DCMAKE_C_COMPILER=
$ROCM_PATH
/llvm/bin/clang -DCMAKE_CXX_COMPILER=
$ROCM_PATH
/llvm/bin/clang++ -DAMDGPU_TARGETS=
$(
amdGPUs
)
-DGPU_TARGETS=
$(
amdGPUs
)
"
CMAKE_DEFS
=
"
${
COMMON_CMAKE_DEFS
}
${
CMAKE_DEFS
}
-DLLAMA_HIPBLAS=on -DCMAKE_C_COMPILER=
$ROCM_PATH
/llvm/bin/clang -DCMAKE_CXX_COMPILER=
$ROCM_PATH
/llvm/bin/clang++ -DAMDGPU_TARGETS=
$(
amdGPUs
)
-DGPU_TARGETS=
$(
amdGPUs
)
"
# Users building from source can tune the exact flags we pass to cmake for configuring llama.cpp
if
[
-n
"
${
OLLAMA_CUSTOM_ROCM_DEFS
}
"
]
;
then
echo
"OLLAMA_CUSTOM_ROCM_DEFS=
\"
${
OLLAMA_CUSTOM_ROCM_DEFS
}
\"
"
CMAKE_DEFS
=
"
${
CMAKE_DEFS
}
${
OLLAMA_CUSTOM_ROCM_DEFS
}
"
echo
"Building custom ROCM GPU"
fi
BUILD_DIR
=
"../build/linux/
${
ARCH
}
/rocm
${
ROCM_VARIANT
}
"
BUILD_DIR
=
"../build/linux/
${
ARCH
}
/rocm
${
ROCM_VARIANT
}
"
EXTRA_LIBS
=
"-L
${
ROCM_PATH
}
/lib -L/opt/amdgpu/lib/x86_64-linux-gnu/ -Wl,-rpath,
\$
ORIGIN/../../rocm/ -lhipblas -lrocblas -lamdhip64 -lrocsolver -lamd_comgr -lhsa-runtime64 -lrocsparse -ldrm -ldrm_amdgpu"
EXTRA_LIBS
=
"-L
${
ROCM_PATH
}
/lib -L/opt/amdgpu/lib/x86_64-linux-gnu/ -Wl,-rpath,
\$
ORIGIN/../../rocm/ -lhipblas -lrocblas -lamdhip64 -lrocsolver -lamd_comgr -lhsa-runtime64 -lrocsparse -ldrm -ldrm_amdgpu"
build
build
...
...
llm/generate/gen_windows.ps1
View file @
cc5a71e0
...
@@ -243,6 +243,11 @@ if ($null -ne $script:CUDA_LIB_DIR) {
...
@@ -243,6 +243,11 @@ if ($null -ne $script:CUDA_LIB_DIR) {
init_vars
init_vars
$
script
:
buildDir
="
..
/build/windows/
${script:ARCH}
/cuda
$
script
:
CUDA_VARIANT
"
$
script
:
buildDir
="
..
/build/windows/
${script:ARCH}
/cuda
$
script
:
CUDA_VARIANT
"
$
script
:
cmakeDefs
+= @("
-A
", "
x64
", "
-DLLAMA_CUDA
=
ON
", "
-DLLAMA_AVX
=
on
", "
-DLLAMA_AVX2
=
off
", "
-DCUDAToolkit_INCLUDE_DIR
=
$
script
:
CUDA_INCLUDE_DIR
", "
-DCMAKE_CUDA_ARCHITECTURES
=
${script:CMAKE_CUDA_ARCHITECTURES}
")
$
script
:
cmakeDefs
+= @("
-A
", "
x64
", "
-DLLAMA_CUDA
=
ON
", "
-DLLAMA_AVX
=
on
", "
-DLLAMA_AVX2
=
off
", "
-DCUDAToolkit_INCLUDE_DIR
=
$
script
:
CUDA_INCLUDE_DIR
", "
-DCMAKE_CUDA_ARCHITECTURES
=
${script:CMAKE_CUDA_ARCHITECTURES}
")
if (
$null
-ne
$
env
:
OLLAMA_CUSTOM_CUDA_DEFS
) {
write-host "
OLLAMA_CUSTOM_CUDA_DEFS
=
`
"
${env:OLLAMA_CUSTOM_CUDA_DEFS}
`"
"
$
script
:
cmakeDefs
+=
@(
"
${env:OLLAMA_CUSTOM_CUDA_DEFS}
"
)
write-host
"building custom CUDA GPU"
}
build
build
sign
sign
compress
compress
...
@@ -274,7 +279,11 @@ if ($null -ne $env:HIP_PATH) {
...
@@ -274,7 +279,11 @@ if ($null -ne $env:HIP_PATH) {
# We have to clobber the LIB var from the developer shell for clang to work properly
# We have to clobber the LIB var from the developer shell for clang to work properly
$
env
:
LIB
=""
$
env
:
LIB
=""
if (
$null
-ne
$
env
:
OLLAMA_CUSTOM_ROCM_DEFS
) {
write-host "
OLLAMA_CUSTOM_ROCM_DEFS
=
`
"
${env:OLLAMA_CUSTOM_ROCM_DEFS}
`"
"
$
script
:
cmakeDefs
+=
@(
"
${env:OLLAMA_CUSTOM_ROCM_DEFS}
"
)
write-host
"building custom ROCM GPU"
}
write-host
"Building ROCm"
write-host
"Building ROCm"
build
build
# Ninja doesn't prefix with config name
# Ninja doesn't prefix with config name
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment