Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
ollama
Commits
27388377
Unverified
Commit
27388377
authored
Jan 21, 2024
by
Daniel Hiltgen
Committed by
GitHub
Jan 21, 2024
Browse files
Merge pull request #2131 from dhiltgen/probe_cards_at_init
Probe GPUs before backend init
parents
fa8c990e
ec376453
Changes
1
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
34 additions
and
1 deletion
+34
-1
llm/ext_server/ext_server.cpp
llm/ext_server/ext_server.cpp
+34
-1
No files found.
llm/ext_server/ext_server.cpp
View file @
27388377
...
...
@@ -3,6 +3,27 @@
// Necessary evil since the server types are not defined in a header
#include "server.cpp"
// Low level API access to verify GPU access
#if defined(GGML_USE_CUBLAS)
#if defined(GGML_USE_HIPBLAS)
#include <hip/hip_runtime.h>
#include <hipblas/hipblas.h>
#include <hip/hip_fp16.h>
#ifdef __HIP_PLATFORM_AMD__
// for rocblas_initialize()
#include "rocblas/rocblas.h"
#endif // __HIP_PLATFORM_AMD__
#define cudaGetDevice hipGetDevice
#define cudaError_t hipError_t
#define cudaSuccess hipSuccess
#define cudaGetErrorString hipGetErrorString
#else
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda_fp16.h>
#endif // defined(GGML_USE_HIPBLAS)
#endif // GGML_USE_CUBLAS
// Expose the llama server as a callable extern "C" API
llama_server_context
*
llama
=
NULL
;
std
::
atomic
<
bool
>
ext_server_running
(
false
);
...
...
@@ -12,7 +33,7 @@ void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err) {
#if SERVER_VERBOSE != 1
log_disable
();
#endif
LOG_TEE
(
"system info: %s"
,
llama_print_system_info
());
LOG_TEE
(
"system info: %s
\n
"
,
llama_print_system_info
());
assert
(
err
!=
NULL
&&
sparams
!=
NULL
);
err
->
id
=
0
;
err
->
msg
[
0
]
=
'\0'
;
...
...
@@ -60,6 +81,18 @@ void llama_server_init(ext_server_params *sparams, ext_server_resp_t *err) {
params
.
mmproj
=
std
::
string
(
sparams
->
mmproj
);
}
#if defined(GGML_USE_CUBLAS)
// Before attempting to init the backend which will assert on error, verify the CUDA/ROCM GPU is accessible
LOG_TEE
(
"Performing pre-initialization of GPU
\n
"
);
int
id
;
cudaError_t
cudaErr
=
cudaGetDevice
(
&
id
);
if
(
cudaErr
!=
cudaSuccess
)
{
err
->
id
=
-
1
;
snprintf
(
err
->
msg
,
err
->
msg_len
,
"Unable to init GPU: %s"
,
cudaGetErrorString
(
cudaErr
));
return
;
}
#endif
llama_backend_init
(
params
.
numa
);
// load the model
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment