CMakeLists.txt 6.29 KB
Newer Older
wangkx1's avatar
init  
wangkx1 committed
1
2
3
4
cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories.
project("llama.cpp" C CXX)
include(CheckIncludeFileCXX)

wangkx1's avatar
wangkx1 committed
5
6
7
#set(CMAKE_WARN_DEPRECATED YES)
set(CMAKE_WARN_UNUSED_CLI YES)

wangkx1's avatar
init  
wangkx1 committed
8
9
10
11
12
13
14
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)

if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)
    set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
    set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
endif()

wangkx1's avatar
wangkx1 committed
15
16
17
# Add path to modules
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")

wangkx1's avatar
init  
wangkx1 committed
18
19
20
21
22
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)

if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
    set(LLAMA_STANDALONE ON)

wangkx1's avatar
wangkx1 committed
23
24
    include(git-vars)

wangkx1's avatar
init  
wangkx1 committed
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
    # configure project version
    # TODO
else()
    set(LLAMA_STANDALONE OFF)
endif()

if (EMSCRIPTEN)
    set(BUILD_SHARED_LIBS_DEFAULT OFF)

    option(LLAMA_WASM_SINGLE_FILE "llama: embed WASM inside the generated llama.js" ON)
else()
    if (MINGW)
        set(BUILD_SHARED_LIBS_DEFAULT OFF)
    else()
        set(BUILD_SHARED_LIBS_DEFAULT ON)
    endif()
endif()

wangkx1's avatar
wangkx1 committed
43
option(BUILD_SHARED_LIBS "build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT})
wangkx1's avatar
init  
wangkx1 committed
44

wangkx1's avatar
wangkx1 committed
45
46
if (WIN32)
    add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
wangkx1's avatar
init  
wangkx1 committed
47
48
endif()

wangkx1's avatar
wangkx1 committed
49
50
51
#
# option list
#
wangkx1's avatar
init  
wangkx1 committed
52
53

# debug
wangkx1's avatar
wangkx1 committed
54
55
option(LLAMA_ALL_WARNINGS           "llama: enable all compiler warnings"                   ON)
option(LLAMA_ALL_WARNINGS_3RD_PARTY "llama: enable all compiler warnings in 3rd party libs" OFF)
wangkx1's avatar
init  
wangkx1 committed
56
57

# build
wangkx1's avatar
wangkx1 committed
58
option(LLAMA_FATAL_WARNINGS "llama: enable -Werror flag" OFF)
wangkx1's avatar
init  
wangkx1 committed
59
60

# sanitizers
wangkx1's avatar
wangkx1 committed
61
62
63
option(LLAMA_SANITIZE_THREAD    "llama: enable thread sanitizer"    OFF)
option(LLAMA_SANITIZE_ADDRESS   "llama: enable address sanitizer"   OFF)
option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" OFF)
wangkx1's avatar
init  
wangkx1 committed
64

wangkx1's avatar
wangkx1 committed
65
66
67
68
# extra artifacts
option(LLAMA_BUILD_TESTS    "llama: build tests"          ${LLAMA_STANDALONE})
option(LLAMA_BUILD_EXAMPLES "llama: build examples"       ${LLAMA_STANDALONE})
option(LLAMA_BUILD_SERVER   "llama: build server example" ${LLAMA_STANDALONE})
wangkx1's avatar
init  
wangkx1 committed
69
70

# 3rd party libs
wangkx1's avatar
wangkx1 committed
71
option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF)
wangkx1's avatar
init  
wangkx1 committed
72
73

# Required for relocatable CMake package
wangkx1's avatar
wangkx1 committed
74
include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake)
wangkx1's avatar
init  
wangkx1 committed
75

wangkx1's avatar
wangkx1 committed
76
77
78
79
80
81
# override ggml options
set(GGML_SANITIZE_THREAD    ${LLAMA_SANITIZE_THREAD})
set(GGML_SANITIZE_ADDRESS   ${LLAMA_SANITIZE_ADDRESS})
set(GGML_SANITIZE_UNDEFINED ${LLAMA_SANITIZE_UNDEFINED})
set(GGML_ALL_WARNINGS       ${LLAMA_ALL_WARNINGS})
set(GGML_FATAL_WARNINGS     ${LLAMA_FATAL_WARNINGS})
wangkx1's avatar
init  
wangkx1 committed
82

wangkx1's avatar
wangkx1 committed
83
84
85
# change the default for these ggml options
if (NOT DEFINED GGML_LLAMAFILE)
    set(GGML_LLAMAFILE ON)
wangkx1's avatar
init  
wangkx1 committed
86
87
endif()

wangkx1's avatar
wangkx1 committed
88
89
if (NOT DEFINED GGML_CUDA_USE_GRAPHS)
    set(GGML_CUDA_USE_GRAPHS ON)
wangkx1's avatar
init  
wangkx1 committed
90
91
endif()

wangkx1's avatar
wangkx1 committed
92
93
94
95
96
# transition helpers
function (llama_option_depr TYPE OLD NEW)
    if (${OLD})
        message(${TYPE} "${OLD} is deprecated and will be removed in the future.\nUse ${NEW} instead\n")
        set(${NEW} ON PARENT_SCOPE)
wangkx1's avatar
init  
wangkx1 committed
97
98
99
    endif()
endfunction()

wangkx1's avatar
wangkx1 committed
100
101
102
103
104
105
106
107
108
109
llama_option_depr(FATAL_ERROR LLAMA_CUBLAS              GGML_CUDA)
llama_option_depr(WARNING     LLAMA_CUDA                GGML_CUDA)
llama_option_depr(WARNING     LLAMA_KOMPUTE             GGML_KOMPUTE)
llama_option_depr(WARNING     LLAMA_METAL               GGML_METAL)
llama_option_depr(WARNING     LLAMA_METAL_EMBED_LIBRARY GGML_METAL_EMBED_LIBRARY)
llama_option_depr(WARNING     LLAMA_NATIVE              GGML_NATIVE)
llama_option_depr(WARNING     LLAMA_RPC                 GGML_RPC)
llama_option_depr(WARNING     LLAMA_SYCL                GGML_SYCL)
llama_option_depr(WARNING     LLAMA_SYCL_F16            GGML_SYCL_F16)
llama_option_depr(WARNING     LLAMA_CANN                GGML_CANN)
wangkx1's avatar
init  
wangkx1 committed
110
111

#
wangkx1's avatar
wangkx1 committed
112
# build the library
wangkx1's avatar
init  
wangkx1 committed
113
114
#

wangkx1's avatar
wangkx1 committed
115
116
117
if (NOT TARGET ggml)
    add_subdirectory(ggml)
    # ... otherwise assume ggml is added by a parent CMakeLists.txt
wangkx1's avatar
init  
wangkx1 committed
118
endif()
wangkx1's avatar
wangkx1 committed
119
add_subdirectory(src)
wangkx1's avatar
init  
wangkx1 committed
120
121

#
wangkx1's avatar
wangkx1 committed
122
# install
wangkx1's avatar
init  
wangkx1 committed
123
124
#

wangkx1's avatar
wangkx1 committed
125
126
include(GNUInstallDirs)
include(CMakePackageConfigHelpers)
wangkx1's avatar
init  
wangkx1 committed
127

wangkx1's avatar
wangkx1 committed
128
129
130
set(LLAMA_BUILD_NUMBER        ${BUILD_NUMBER})
set(LLAMA_BUILD_COMMIT        ${BUILD_COMMIT})
set(LLAMA_INSTALL_VERSION 0.0.${BUILD_NUMBER})
wangkx1's avatar
init  
wangkx1 committed
131

wangkx1's avatar
wangkx1 committed
132
133
134
set(LLAMA_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header  files")
set(LLAMA_LIB_INSTALL_DIR     ${CMAKE_INSTALL_LIBDIR}     CACHE PATH "Location of library files")
set(LLAMA_BIN_INSTALL_DIR     ${CMAKE_INSTALL_BINDIR}     CACHE PATH "Location of binary  files")
wangkx1's avatar
init  
wangkx1 committed
135
136


wangkx1's avatar
wangkx1 committed
137
138
139
140
# At the moment some compile definitions are placed within the ggml/src
# directory but not exported on the `ggml` target. This could be improved by
# determining _precisely_ which defines are necessary for the llama-config
# package.
wangkx1's avatar
init  
wangkx1 committed
141
#
wangkx1's avatar
wangkx1 committed
142
143
144
145
146
get_target_property(GGML_DIRECTORY ggml SOURCE_DIR)
get_directory_property(GGML_DIR_DEFINES DIRECTORY ${GGML_DIRECTORY} COMPILE_DEFINITIONS)
get_target_property(GGML_TARGET_DEFINES ggml COMPILE_DEFINITIONS)
set(GGML_TRANSIENT_DEFINES ${GGML_TARGET_DEFINES} ${GGML_DIR_DEFINES})
get_target_property(GGML_LINK_LIBRARIES ggml LINK_LIBRARIES)
wangkx1's avatar
init  
wangkx1 committed
147

wangkx1's avatar
wangkx1 committed
148
149
set_target_properties(llama PROPERTIES PUBLIC_HEADER ${CMAKE_CURRENT_SOURCE_DIR}/include/llama.h)
install(TARGETS llama LIBRARY PUBLIC_HEADER)
wangkx1's avatar
init  
wangkx1 committed
150
151

configure_package_config_file(
wangkx1's avatar
wangkx1 committed
152
153
154
        ${CMAKE_CURRENT_SOURCE_DIR}/cmake/llama-config.cmake.in
        ${CMAKE_CURRENT_BINARY_DIR}/llama-config.cmake
    INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/llama
wangkx1's avatar
init  
wangkx1 committed
155
156
157
158
159
    PATH_VARS LLAMA_INCLUDE_INSTALL_DIR
              LLAMA_LIB_INSTALL_DIR
              LLAMA_BIN_INSTALL_DIR )

write_basic_package_version_file(
wangkx1's avatar
wangkx1 committed
160
        ${CMAKE_CURRENT_BINARY_DIR}/llama-version.cmake
wangkx1's avatar
init  
wangkx1 committed
161
162
163
    VERSION ${LLAMA_INSTALL_VERSION}
    COMPATIBILITY SameMajorVersion)

wangkx1's avatar
wangkx1 committed
164
165
166
install(FILES ${CMAKE_CURRENT_BINARY_DIR}/llama-config.cmake
              ${CMAKE_CURRENT_BINARY_DIR}/llama-version.cmake
        DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/llama)
wangkx1's avatar
init  
wangkx1 committed
167
168

install(
wangkx1's avatar
wangkx1 committed
169
    FILES convert_hf_to_gguf.py
wangkx1's avatar
init  
wangkx1 committed
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
    PERMISSIONS
        OWNER_READ
        OWNER_WRITE
        OWNER_EXECUTE
        GROUP_READ
        GROUP_EXECUTE
        WORLD_READ
        WORLD_EXECUTE
    DESTINATION ${CMAKE_INSTALL_BINDIR})

configure_file(cmake/llama.pc.in
        "${CMAKE_CURRENT_BINARY_DIR}/llama.pc"
        @ONLY)

install(FILES "${CMAKE_CURRENT_BINARY_DIR}/llama.pc"
        DESTINATION lib/pkgconfig)

#
# programs, examples and tests
#

add_subdirectory(common)

if (LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
    include(CTest)
    add_subdirectory(tests)
endif ()

if (LLAMA_BUILD_EXAMPLES)
    add_subdirectory(examples)
    add_subdirectory(pocs)
endif()