Commit 9812d57c authored by SkqLiao's avatar SkqLiao
Browse files

fix typo, logging to file

parent 0f1684c2
......@@ -60,6 +60,11 @@ jobs:
export LD_LIBRARY_PATH=/usr/local/cuda-12.4/lib64:$LD_LIBRARY_PATH
export CUDA_HOME=/usr/local/cuda-12.4
cd ${{ github.workspace }}
python ktransformers/local_chat.py --model_path /home/qujing3/models/DeepSeek-R1-Q4_K_M/config --gguf_path /home/qujing3/models/DeepSeek-R1-Q4_K_M/ --max_new_tokens 100 --cache_len 1536 --cpu_infer 64 --prompt_file /home/qujing3/promptsbook.txt
DeepSeek-R1-Q4_K_M/config --gguf_path /home/qujing3/models/DeepSeek-R1-Q4_K_M/ --max_new_tokens 100 --cache_len 1536 --cpu_infer 64 --prompt_file /home/qujing3/prompts/chinese.txt
echo "Running Local Chat 1"
python ktransformers/local_chat.py --model_path /home/qujing3/models/DeepSeek-R1-Q4_K_M/config --gguf_path /home/qujing3/models/DeepSeek-R1-Q4_K_M/ --max_new_tokens 256 --cache_len 1536 --cpu_infer 64 --prompt_file /home/qujing3/prompts/book.txt > log1.txt
sed -n '/Prompt:,$p' log1.txt
echo "Running Local Chat 2"
python ktransformers/local_chat.py --model_path /home/qujing3/models/DeepSeek-R1-Q4_K_M/config --gguf_path /home/qujing3/models/DeepSeek-R1-Q4_K_M/ --max_new_tokens 256 --cache_len 1536 --cpu_infer 64 --prompt_file /home/qujing3/prompts/chinese.txt > log2.txt
sed -n '/Prompt:,$p' log2.txt
- run: echo "This job's status is ${{ job.status }}."
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment