"llm/llama.cpp/vscode:/vscode.git/clone" did not exist on "217903ab5111124041b3bb9d7a04d9101f2f32b2"
Unverified Commit e15932bb authored by Nelson Liu's avatar Nelson Liu Committed by GitHub
Browse files

Only emit warning about internal tokenizer if it isn't being used (#939)

parent ce741ba3
......@@ -25,7 +25,8 @@ def get_tokenizer(
"Cannot use the fast tokenizer in slow tokenizer mode.")
kwargs["use_fast"] = False
if "llama" in tokenizer_name.lower() and kwargs.get("use_fast", True):
if ("llama" in tokenizer_name.lower() and kwargs.get("use_fast", True)
and tokenizer_name != _FAST_LLAMA_TOKENIZER):
logger.info(
"For some LLaMA-based models, initializing the fast tokenizer may "
"take a long time. To eliminate the initialization time, consider "
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment