START TIME: Fri Mar 15 11:07:21 CST 2024 [2024-03-15 11:07:58,964] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) [2024-03-15 11:08:32,142] [INFO] [runner.py:463:main] Using IP address of 10.3.6.47 for node c06r3n06 [2024-03-15 11:08:32,166] [INFO] [multinode_runner.py:72:get_cmd] Running on the following workers: c06r3n06,c06r3n07,c06r3n08,c06r3n09 [2024-03-15 11:08:32,167] [INFO] [runner.py:570:main] cmd = pdsh -S -f 1024 -w c06r3n06,c06r3n07,c06r3n08,c06r3n09 export UCX_MAX_EAGER_LANES=4; export UCX_MAX_RNDV_LANES=4; export UCX_ZCOPY_THRESH=auto; export UCX_WARN_UNUSED_ENV_VARS=n; export UCX_RNDV_THRESH=auto; export NCCL_IB_TIMEOUT=22; export UCX_IB_PCI_BW=mlx5_0:50Gbs,mlx5_1:50Gbs,mlx5_2:50Gbs,mlx5_3:50Gbs; export UCX_NET_DEVICES=mlx5_0:1,mlx5_1:1,mlx5_2:1,mlx5_3:1; export PYTHONPATH=/work/home/liangjing/LLM/LLaMA-Factory-main; cd /work/home/liangjing/LLM/LLaMA-Factory-main; /work/home/liangjing/anaconda3/envs/torch2.1/bin/python -u -m deepspeed.launcher.launch --world_info=eyJjMDZyM24wNiI6IFswLCAxLCAyLCAzXSwgImMwNnIzbjA3IjogWzAsIDEsIDIsIDNdLCAiYzA2cjNuMDgiOiBbMCwgMSwgMiwgM10sICJjMDZyM24wOSI6IFswLCAxLCAyLCAzXX0= --node_rank=%n --master_addr=10.3.6.47 --master_port=29500 src/train_bash.py --stage 'sft' --do_train --template 'llama2' --dataset 'alpaca_gpt4_en,alpaca_gpt4_zh' --finetuning_type 'full' --model_name_or_path '/work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b' --output_dir '/work/share/huchen1/liangjj/llama_factory' --per_device_train_batch_size '1' --per_device_eval_batch_size '1' --gradient_accumulation_steps '1' --preprocessing_num_workers '2' --lr_scheduler_type 'cosine' --logging_steps '10' --save_steps '100' --eval_steps '100' --learning_rate '5e-5' --max_grad_norm '0.5' --num_train_epochs '4.0' --val_size '0.01' --evaluation_strategy 'steps' --load_best_model_at_end --weight_decay '0.' --warmup_ratio '0.03' --plot_loss --fp16 --save_on_each_node --deepspeed 'deepspeed.json' c06r3n06: [2024-03-15 11:09:00,339] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n08: [2024-03-15 11:09:02,603] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n07: [2024-03-15 11:09:02,637] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n09: [2024-03-15 11:09:02,657] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n06: [2024-03-15 11:09:21,955] [INFO] [launch.py:138:main] 0 NCCL_IB_TIMEOUT=22 c06r3n06: [2024-03-15 11:09:21,955] [INFO] [launch.py:145:main] WORLD INFO DICT: {'c06r3n06': [0, 1, 2, 3], 'c06r3n07': [0, 1, 2, 3], 'c06r3n08': [0, 1, 2, 3], 'c06r3n09': [0, 1, 2, 3]} c06r3n06: [2024-03-15 11:09:21,955] [INFO] [launch.py:151:main] nnodes=4, num_local_procs=4, node_rank=0 c06r3n06: [2024-03-15 11:09:21,955] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'c06r3n06': [0, 1, 2, 3], 'c06r3n07': [4, 5, 6, 7], 'c06r3n08': [8, 9, 10, 11], 'c06r3n09': [12, 13, 14, 15]}) c06r3n06: [2024-03-15 11:09:21,955] [INFO] [launch.py:163:main] dist_world_size=16 c06r3n06: [2024-03-15 11:09:21,955] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3 c06r3n08: [2024-03-15 11:09:22,245] [INFO] [launch.py:138:main] 2 NCCL_IB_TIMEOUT=22 c06r3n09: [2024-03-15 11:09:22,245] [INFO] [launch.py:138:main] 3 NCCL_IB_TIMEOUT=22 c06r3n09: [2024-03-15 11:09:22,246] [INFO] [launch.py:145:main] WORLD INFO DICT: {'c06r3n06': [0, 1, 2, 3], 'c06r3n07': [0, 1, 2, 3], 'c06r3n08': [0, 1, 2, 3], 'c06r3n09': [0, 1, 2, 3]} c06r3n09: [2024-03-15 11:09:22,246] [INFO] [launch.py:151:main] nnodes=4, num_local_procs=4, node_rank=3 c06r3n09: [2024-03-15 11:09:22,246] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'c06r3n06': [0, 1, 2, 3], 'c06r3n07': [4, 5, 6, 7], 'c06r3n08': [8, 9, 10, 11], 'c06r3n09': [12, 13, 14, 15]}) c06r3n09: [2024-03-15 11:09:22,246] [INFO] [launch.py:163:main] dist_world_size=16 c06r3n09: [2024-03-15 11:09:22,246] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3 c06r3n07: [2024-03-15 11:09:22,247] [INFO] [launch.py:138:main] 1 NCCL_IB_TIMEOUT=22 c06r3n07: [2024-03-15 11:09:22,265] [INFO] [launch.py:145:main] WORLD INFO DICT: {'c06r3n06': [0, 1, 2, 3], 'c06r3n07': [0, 1, 2, 3], 'c06r3n08': [0, 1, 2, 3], 'c06r3n09': [0, 1, 2, 3]} c06r3n08: [2024-03-15 11:09:22,264] [INFO] [launch.py:145:main] WORLD INFO DICT: {'c06r3n06': [0, 1, 2, 3], 'c06r3n07': [0, 1, 2, 3], 'c06r3n08': [0, 1, 2, 3], 'c06r3n09': [0, 1, 2, 3]} c06r3n07: [2024-03-15 11:09:22,265] [INFO] [launch.py:151:main] nnodes=4, num_local_procs=4, node_rank=1 c06r3n08: [2024-03-15 11:09:22,265] [INFO] [launch.py:151:main] nnodes=4, num_local_procs=4, node_rank=2 c06r3n07: [2024-03-15 11:09:22,266] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'c06r3n06': [0, 1, 2, 3], 'c06r3n07': [4, 5, 6, 7], 'c06r3n08': [8, 9, 10, 11], 'c06r3n09': [12, 13, 14, 15]}) c06r3n08: [2024-03-15 11:09:22,265] [INFO] [launch.py:162:main] global_rank_mapping=defaultdict(, {'c06r3n06': [0, 1, 2, 3], 'c06r3n07': [4, 5, 6, 7], 'c06r3n08': [8, 9, 10, 11], 'c06r3n09': [12, 13, 14, 15]}) c06r3n07: [2024-03-15 11:09:22,266] [INFO] [launch.py:163:main] dist_world_size=16 c06r3n08: [2024-03-15 11:09:22,265] [INFO] [launch.py:163:main] dist_world_size=16 c06r3n07: [2024-03-15 11:09:22,266] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3 c06r3n08: [2024-03-15 11:09:22,265] [INFO] [launch.py:165:main] Setting CUDA_VISIBLE_DEVICES=0,1,2,3 c06r3n07: [2024-03-15 11:09:53,381] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n07: [2024-03-15 11:09:53,382] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n07: [2024-03-15 11:09:53,382] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n07: [2024-03-15 11:09:53,382] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n06: [2024-03-15 11:09:53,385] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n06: [2024-03-15 11:09:53,385] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n06: [2024-03-15 11:09:53,385] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n06: [2024-03-15 11:09:53,385] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n09: [2024-03-15 11:09:53,478] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n09: [2024-03-15 11:09:53,478] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n09: [2024-03-15 11:09:53,478] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n09: [2024-03-15 11:09:53,478] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n08: [2024-03-15 11:09:53,505] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n08: [2024-03-15 11:09:53,505] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n08: [2024-03-15 11:09:53,505] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n08: [2024-03-15 11:09:53,505] [INFO] [real_accelerator.py:158:get_accelerator] Setting ds_accelerator to cuda (auto detect) c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n09: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n07: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n06: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n09: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n08: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n07: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n06: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n09: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n08: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n07: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n06: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n09: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n08: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n07: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n09: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n06: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n08: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n07: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n09: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n06: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n08: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n07: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n09: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n06: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n08: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n07: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n09: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n06: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n08: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n07: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/gradio_client/documentation.py:103: UserWarning: Could not get documentation group for : No known documentation group for module 'gradio.mix' c06r3n06: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n08: warnings.warn(f"Could not get documentation group for {cls}: {exc}") c06r3n08: [2024-03-15 11:10:30,298] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n08: [2024-03-15 11:10:30,299] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n09: [2024-03-15 11:10:30,299] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n08: [2024-03-15 11:10:30,300] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n08: [2024-03-15 11:10:30,300] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n09: [2024-03-15 11:10:30,300] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n09: [2024-03-15 11:10:30,300] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n09: [2024-03-15 11:10:30,300] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n07: [2024-03-15 11:10:30,301] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n06: [2024-03-15 11:10:30,301] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n07: [2024-03-15 11:10:30,302] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n06: [2024-03-15 11:10:30,301] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n06: [2024-03-15 11:10:30,302] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n07: [2024-03-15 11:10:30,302] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n06: [2024-03-15 11:10:30,302] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n06: [2024-03-15 11:10:30,302] [INFO] [comm.py:668:init_distributed] Initializing TorchBackend in DeepSpeed with backend nccl c06r3n07: [2024-03-15 11:10:30,303] [INFO] [comm.py:637:init_distributed] cdb=None c06r3n09: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n09: I0315 11:10:30.326577 12645 ProcessGroupNCCL.cpp:686] [Rank 12] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=248702704 c06r3n09: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n09: I0315 11:10:30.326625 12648 ProcessGroupNCCL.cpp:686] [Rank 15] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=271021504 c06r3n09: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n09: I0315 11:10:30.326658 12647 ProcessGroupNCCL.cpp:686] [Rank 14] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=260988848 c06r3n09: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n09: I0315 11:10:30.328519 12646 ProcessGroupNCCL.cpp:686] [Rank 13] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=258321824 c06r3n08: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n08: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n08: I0315 11:10:30.338122 10992 ProcessGroupNCCL.cpp:686] [Rank 10] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=260118240 c06r3n08: I0315 11:10:30.338132 10991 ProcessGroupNCCL.cpp:686] [Rank 9] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=273416880 c06r3n07: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n07: I0315 11:10:30.340847 30394 ProcessGroupNCCL.cpp:686] [Rank 6] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=252050768 c06r3n07: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n07: I0315 11:10:30.340865 30393 ProcessGroupNCCL.cpp:686] [Rank 5] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=270378416 c06r3n07: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n07: I0315 11:10:30.340873 30395 ProcessGroupNCCL.cpp:686] [Rank 7] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=251748448 c06r3n07: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n07: I0315 11:10:30.340847 30392 ProcessGroupNCCL.cpp:686] [Rank 4] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=258206800 c06r3n09: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Process rank: 3, device: cuda:3, n_gpu: 1 c06r3n09: distributed training: True, compute dtype: torch.float16 c06r3n09: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Process rank: 0, device: cuda:0, n_gpu: 1 c06r3n09: distributed training: True, compute dtype: torch.float16 c06r3n09: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Process rank: 2, device: cuda:2, n_gpu: 1 c06r3n09: distributed training: True, compute dtype: torch.float16 c06r3n09: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Process rank: 1, device: cuda:1, n_gpu: 1 c06r3n09: distributed training: True, compute dtype: torch.float16 c06r3n09: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n09: _n_gpu=1, c06r3n09: adafactor=False, c06r3n09: adam_beta1=0.9, c06r3n09: adam_beta2=0.999, c06r3n09: adam_epsilon=1e-08, c06r3n09: auto_find_batch_size=False, c06r3n09: bf16=False, c06r3n09: bf16_full_eval=False, c06r3n09: data_seed=None, c06r3n09: dataloader_drop_last=False, c06r3n09: dataloader_num_workers=0, c06r3n09: dataloader_persistent_workers=False, c06r3n09: dataloader_pin_memory=True, c06r3n09: ddp_backend=None, c06r3n09: ddp_broadcast_buffers=None, c06r3n09: ddp_bucket_cap_mb=None, c06r3n09: ddp_find_unused_parameters=None, c06r3n09: ddp_timeout=1800, c06r3n09: debug=[], c06r3n09: deepspeed=deepspeed.json, c06r3n09: disable_tqdm=False, c06r3n09: dispatch_batches=None, c06r3n09: do_eval=True, c06r3n09: do_predict=False, c06r3n09: do_train=True, c06r3n09: eval_accumulation_steps=None, c06r3n09: eval_delay=0, c06r3n09: eval_steps=100, c06r3n09: evaluation_strategy=steps, c06r3n09: fp16=True, c06r3n09: fp16_backend=auto, c06r3n09: fp16_full_eval=False, c06r3n09: fp16_opt_level=O1, c06r3n09: fsdp=[], c06r3n09: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n09: fsdp_min_num_params=0, c06r3n09: fsdp_transformer_layer_cls_to_wrap=None, c06r3n09: full_determinism=False, c06r3n09: generation_config=None, c06r3n09: generation_max_length=None, c06r3n09: generation_num_beams=None, c06r3n09: gradient_accumulation_steps=1, c06r3n09: gradient_checkpointing=False, c06r3n09: gradient_checkpointing_kwargs=None, c06r3n09: greater_is_better=False, c06r3n09: group_by_length=False, c06r3n09: half_precision_backend=auto, c06r3n09: hub_always_push=False, c06r3n09: hub_model_id=None, c06r3n09: hub_private_repo=False, c06r3n09: hub_strategy=every_save, c06r3n09: hub_token=, c06r3n09: ignore_data_skip=False, c06r3n09: include_inputs_for_metrics=False, c06r3n09: include_num_input_tokens_seen=False, c06r3n09: include_tokens_per_second=False, c06r3n09: jit_mode_eval=False, c06r3n09: label_names=None, c06r3n09: label_smoothing_factor=0.0, c06r3n09: learning_rate=5e-05, c06r3n09: length_column_name=length, c06r3n09: load_best_model_at_end=True, c06r3n09: local_rank=3, c06r3n09: log_level=passive, c06r3n09: log_level_replica=warning, c06r3n09: log_on_each_node=True, c06r3n09: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n09, c06r3n09: logging_first_step=False, c06r3n09: logging_nan_inf_filter=True, c06r3n09: logging_steps=10, c06r3n09: logging_strategy=steps, c06r3n09: lr_scheduler_kwargs={}, c06r3n09: lr_scheduler_type=cosine, c06r3n09: max_grad_norm=0.5, c06r3n09: max_steps=-1, c06r3n09: metric_for_best_model=loss, c06r3n09: mp_parameters=, c06r3n09: neftune_noise_alpha=None, c06r3n09: no_cuda=False, c06r3n09: num_train_epochs=4.0, c06r3n09: optim=adamw_torch, c06r3n09: optim_args=None, c06r3n09: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n09: overwrite_output_dir=False, c06r3n09: past_index=-1, c06r3n09: per_device_eval_batch_size=1, c06r3n09: per_device_train_batch_size=1, c06r3n09: predict_with_generate=False, c06r3n09: prediction_loss_only=False, c06r3n09: push_to_hub=False, c06r3n09: push_to_hub_model_id=None, c06r3n09: push_to_hub_organization=None, c06r3n09: push_to_hub_token=, c06r3n09: ray_scope=last, c06r3n09: remove_unused_columns=True, c06r3n09: report_to=['tensorboard'], c06r3n09: resume_from_checkpoint=None, c06r3n09: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n09: save_on_each_node=True, c06r3n09: save_only_model=False, c06r3n09: save_safetensors=True, c06r3n09: save_steps=100, c06r3n09: save_strategy=steps, c06r3n09: save_total_limit=None, c06r3n09: seed=42, c06r3n09: skip_memory_metrics=True, c06r3n09: sortish_sampler=False, c06r3n09: split_batches=False, c06r3n09: tf32=None, c06r3n09: torch_compile=False, c06r3n09: torch_compile_backend=None, c06r3n09: torch_compile_mode=None, c06r3n09: torchdynamo=None, c06r3n09: tpu_metrics_debug=False, c06r3n09: tpu_num_cores=None, c06r3n09: use_cpu=False, c06r3n09: use_ipex=False, c06r3n09: use_legacy_prediction_loop=False, c06r3n09: use_mps_device=False, c06r3n09: warmup_ratio=0.03, c06r3n09: warmup_steps=0, c06r3n09: weight_decay=0.0, c06r3n09: ) c06r3n09: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n09: _n_gpu=1, c06r3n09: adafactor=False, c06r3n09: adam_beta1=0.9, c06r3n09: adam_beta2=0.999, c06r3n09: adam_epsilon=1e-08, c06r3n09: auto_find_batch_size=False, c06r3n09: bf16=False, c06r3n09: bf16_full_eval=False, c06r3n09: data_seed=None, c06r3n09: dataloader_drop_last=False, c06r3n09: dataloader_num_workers=0, c06r3n09: dataloader_persistent_workers=False, c06r3n09: dataloader_pin_memory=True, c06r3n09: ddp_backend=None, c06r3n09: ddp_broadcast_buffers=None, c06r3n09: ddp_bucket_cap_mb=None, c06r3n09: ddp_find_unused_parameters=None, c06r3n09: ddp_timeout=1800, c06r3n09: debug=[], c06r3n09: deepspeed=deepspeed.json, c06r3n09: disable_tqdm=False, c06r3n09: dispatch_batches=None, c06r3n09: do_eval=True, c06r3n09: do_predict=False, c06r3n09: do_train=True, c06r3n09: eval_accumulation_steps=None, c06r3n09: eval_delay=0, c06r3n09: eval_steps=100, c06r3n09: evaluation_strategy=steps, c06r3n09: fp16=True, c06r3n09: fp16_backend=auto, c06r3n09: fp16_full_eval=False, c06r3n09: fp16_opt_level=O1, c06r3n09: fsdp=[], c06r3n09: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n09: fsdp_min_num_params=0, c06r3n09: fsdp_transformer_layer_cls_to_wrap=None, c06r3n09: full_determinism=False, c06r3n09: generation_config=None, c06r3n09: generation_max_length=None, c06r3n09: generation_num_beams=None, c06r3n09: gradient_accumulation_steps=1, c06r3n09: gradient_checkpointing=False, c06r3n09: gradient_checkpointing_kwargs=None, c06r3n09: greater_is_better=False, c06r3n09: group_by_length=False, c06r3n09: half_precision_backend=auto, c06r3n09: hub_always_push=False, c06r3n09: hub_model_id=None, c06r3n09: hub_private_repo=False, c06r3n09: hub_strategy=every_save, c06r3n09: hub_token=, c06r3n09: ignore_data_skip=False, c06r3n09: include_inputs_for_metrics=False, c06r3n09: include_num_input_tokens_seen=False, c06r3n09: include_tokens_per_second=False, c06r3n09: jit_mode_eval=False, c06r3n09: label_names=None, c06r3n09: label_smoothing_factor=0.0, c06r3n09: learning_rate=5e-05, c06r3n09: length_column_name=length, c06r3n09: load_best_model_at_end=True, c06r3n09: local_rank=0, c06r3n09: log_level=passive, c06r3n09: log_level_replica=warning, c06r3n09: log_on_each_node=True, c06r3n09: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n09, c06r3n09: logging_first_step=False, c06r3n09: logging_nan_inf_filter=True, c06r3n09: logging_steps=10, c06r3n09: logging_strategy=steps, c06r3n09: lr_scheduler_kwargs={}, c06r3n09: lr_scheduler_type=cosine, c06r3n09: max_grad_norm=0.5, c06r3n09: max_steps=-1, c06r3n09: metric_for_best_model=loss, c06r3n09: mp_parameters=, c06r3n09: neftune_noise_alpha=None, c06r3n09: no_cuda=False, c06r3n09: num_train_epochs=4.0, c06r3n09: optim=adamw_torch, c06r3n09: optim_args=None, c06r3n09: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n09: overwrite_output_dir=False, c06r3n09: past_index=-1, c06r3n09: per_device_eval_batch_size=1, c06r3n09: per_device_train_batch_size=1, c06r3n09: predict_with_generate=False, c06r3n09: prediction_loss_only=False, c06r3n09: push_to_hub=False, c06r3n09: push_to_hub_model_id=None, c06r3n09: push_to_hub_organization=None, c06r3n09: push_to_hub_token=, c06r3n09: ray_scope=last, c06r3n09: remove_unused_columns=True, c06r3n09: report_to=['tensorboard'], c06r3n09: resume_from_checkpoint=None, c06r3n09: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n09: save_on_each_node=True, c06r3n09: save_only_model=False, c06r3n09: save_safetensors=True, c06r3n09: save_steps=100, c06r3n09: save_strategy=steps, c06r3n09: save_total_limit=None, c06r3n09: seed=42, c06r3n09: skip_memory_metrics=True, c06r3n09: sortish_sampler=False, c06r3n09: split_batches=False, c06r3n09: tf32=None, c06r3n09: torch_compile=False, c06r3n09: torch_compile_backend=None, c06r3n09: torch_compile_mode=None, c06r3n09: torchdynamo=None, c06r3n09: tpu_metrics_debug=False, c06r3n09: tpu_num_cores=None, c06r3n09: use_cpu=False, c06r3n09: use_ipex=False, c06r3n09: use_legacy_prediction_loop=False, c06r3n09: use_mps_device=False, c06r3n09: warmup_ratio=0.03, c06r3n09: warmup_steps=0, c06r3n09: weight_decay=0.0, c06r3n09: ) c06r3n09: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n09: _n_gpu=1, c06r3n09: adafactor=False, c06r3n09: adam_beta1=0.9, c06r3n09: adam_beta2=0.999, c06r3n09: adam_epsilon=1e-08, c06r3n09: auto_find_batch_size=False, c06r3n09: bf16=False, c06r3n09: bf16_full_eval=False, c06r3n09: data_seed=None, c06r3n09: dataloader_drop_last=False, c06r3n09: dataloader_num_workers=0, c06r3n09: dataloader_persistent_workers=False, c06r3n09: dataloader_pin_memory=True, c06r3n09: ddp_backend=None, c06r3n09: ddp_broadcast_buffers=None, c06r3n09: ddp_bucket_cap_mb=None, c06r3n09: ddp_find_unused_parameters=None, c06r3n09: ddp_timeout=1800, c06r3n09: debug=[], c06r3n09: deepspeed=deepspeed.json, c06r3n09: disable_tqdm=False, c06r3n09: dispatch_batches=None, c06r3n09: do_eval=True, c06r3n09: do_predict=False, c06r3n09: do_train=True, c06r3n09: eval_accumulation_steps=None, c06r3n09: eval_delay=0, c06r3n09: eval_steps=100, c06r3n09: evaluation_strategy=steps, c06r3n09: fp16=True, c06r3n09: fp16_backend=auto, c06r3n09: fp16_full_eval=False, c06r3n09: fp16_opt_level=O1, c06r3n09: fsdp=[], c06r3n09: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n09: fsdp_min_num_params=0, c06r3n09: fsdp_transformer_layer_cls_to_wrap=None, c06r3n09: full_determinism=False, c06r3n09: generation_config=None, c06r3n09: generation_max_length=None, c06r3n09: generation_num_beams=None, c06r3n09: gradient_accumulation_steps=1, c06r3n09: gradient_checkpointing=False, c06r3n09: gradient_checkpointing_kwargs=None, c06r3n09: greater_is_better=False, c06r3n09: group_by_length=False, c06r3n09: half_precision_backend=auto, c06r3n09: hub_always_push=False, c06r3n09: hub_model_id=None, c06r3n09: hub_private_repo=False, c06r3n09: hub_strategy=every_save, c06r3n09: hub_token=, c06r3n09: ignore_data_skip=False, c06r3n09: include_inputs_for_metrics=False, c06r3n09: include_num_input_tokens_seen=False, c06r3n09: include_tokens_per_second=False, c06r3n09: jit_mode_eval=False, c06r3n09: label_names=None, c06r3n09: label_smoothing_factor=0.0, c06r3n09: learning_rate=5e-05, c06r3n09: length_column_name=length, c06r3n09: load_best_model_at_end=True, c06r3n09: local_rank=2, c06r3n09: log_level=passive, c06r3n09: log_level_replica=warning, c06r3n09: log_on_each_node=True, c06r3n09: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n09, c06r3n09: logging_first_step=False, c06r3n09: logging_nan_inf_filter=True, c06r3n09: logging_steps=10, c06r3n09: logging_strategy=steps, c06r3n09: lr_scheduler_kwargs={}, c06r3n09: lr_scheduler_type=cosine, c06r3n09: max_grad_norm=0.5, c06r3n09: max_steps=-1, c06r3n09: metric_for_best_model=loss, c06r3n09: mp_parameters=, c06r3n09: neftune_noise_alpha=None, c06r3n09: no_cuda=False, c06r3n09: num_train_epochs=4.0, c06r3n09: optim=adamw_torch, c06r3n09: optim_args=None, c06r3n09: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n09: overwrite_output_dir=False, c06r3n09: past_index=-1, c06r3n09: per_device_eval_batch_size=1, c06r3n09: per_device_train_batch_size=1, c06r3n09: predict_with_generate=False, c06r3n09: prediction_loss_only=False, c06r3n09: push_to_hub=False, c06r3n09: push_to_hub_model_id=None, c06r3n09: push_to_hub_organization=None, c06r3n09: push_to_hub_token=, c06r3n09: ray_scope=last, c06r3n09: remove_unused_columns=True, c06r3n09: report_to=['tensorboard'], c06r3n09: resume_from_checkpoint=None, c06r3n09: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n09: save_on_each_node=True, c06r3n09: save_only_model=False, c06r3n09: save_safetensors=True, c06r3n09: save_steps=100, c06r3n09: save_strategy=steps, c06r3n09: save_total_limit=None, c06r3n09: seed=42, c06r3n09: skip_memory_metrics=True, c06r3n09: sortish_sampler=False, c06r3n09: split_batches=False, c06r3n09: tf32=None, c06r3n09: torch_compile=False, c06r3n09: torch_compile_backend=None, c06r3n09: torch_compile_mode=None, c06r3n09: torchdynamo=None, c06r3n09: tpu_metrics_debug=False, c06r3n09: tpu_num_cores=None, c06r3n09: use_cpu=False, c06r3n09: use_ipex=False, c06r3n09: use_legacy_prediction_loop=False, c06r3n09: use_mps_device=False, c06r3n09: warmup_ratio=0.03, c06r3n09: warmup_steps=0, c06r3n09: weight_decay=0.0, c06r3n09: ) c06r3n09: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n09: _n_gpu=1, c06r3n09: adafactor=False, c06r3n09: adam_beta1=0.9, c06r3n09: adam_beta2=0.999, c06r3n09: adam_epsilon=1e-08, c06r3n09: auto_find_batch_size=False, c06r3n09: bf16=False, c06r3n09: bf16_full_eval=False, c06r3n09: data_seed=None, c06r3n09: dataloader_drop_last=False, c06r3n09: dataloader_num_workers=0, c06r3n09: dataloader_persistent_workers=False, c06r3n09: dataloader_pin_memory=True, c06r3n09: ddp_backend=None, c06r3n09: ddp_broadcast_buffers=None, c06r3n09: ddp_bucket_cap_mb=None, c06r3n09: ddp_find_unused_parameters=None, c06r3n09: ddp_timeout=1800, c06r3n09: debug=[], c06r3n09: deepspeed=deepspeed.json, c06r3n09: disable_tqdm=False, c06r3n09: dispatch_batches=None, c06r3n09: do_eval=True, c06r3n09: do_predict=False, c06r3n09: do_train=True, c06r3n09: eval_accumulation_steps=None, c06r3n09: eval_delay=0, c06r3n09: eval_steps=100, c06r3n09: evaluation_strategy=steps, c06r3n09: fp16=True, c06r3n09: fp16_backend=auto, c06r3n09: fp16_full_eval=False, c06r3n09: fp16_opt_level=O1, c06r3n09: fsdp=[], c06r3n09: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n09: fsdp_min_num_params=0, c06r3n09: fsdp_transformer_layer_cls_to_wrap=None, c06r3n09: full_determinism=False, c06r3n09: generation_config=None, c06r3n09: generation_max_length=None, c06r3n09: generation_num_beams=None, c06r3n09: gradient_accumulation_steps=1, c06r3n09: gradient_checkpointing=False, c06r3n09: gradient_checkpointing_kwargs=None, c06r3n09: greater_is_better=False, c06r3n09: group_by_length=False, c06r3n09: half_precision_backend=auto, c06r3n09: hub_always_push=False, c06r3n09: hub_model_id=None, c06r3n09: hub_private_repo=False, c06r3n09: hub_strategy=every_save, c06r3n09: hub_token=, c06r3n09: ignore_data_skip=False, c06r3n09: include_inputs_for_metrics=False, c06r3n09: include_num_input_tokens_seen=False, c06r3n09: include_tokens_per_second=False, c06r3n09: jit_mode_eval=False, c06r3n09: label_names=None, c06r3n09: label_smoothing_factor=0.0, c06r3n09: learning_rate=5e-05, c06r3n09: length_column_name=length, c06r3n09: load_best_model_at_end=True, c06r3n09: local_rank=1, c06r3n09: log_level=passive, c06r3n09: log_level_replica=warning, c06r3n09: log_on_each_node=True, c06r3n09: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n09, c06r3n09: logging_first_step=False, c06r3n09: logging_nan_inf_filter=True, c06r3n09: logging_steps=10, c06r3n09: logging_strategy=steps, c06r3n09: lr_scheduler_kwargs={}, c06r3n09: lr_scheduler_type=cosine, c06r3n09: max_grad_norm=0.5, c06r3n09: max_steps=-1, c06r3n09: metric_for_best_model=loss, c06r3n09: mp_parameters=, c06r3n09: neftune_noise_alpha=None, c06r3n09: no_cuda=False, c06r3n09: num_train_epochs=4.0, c06r3n09: optim=adamw_torch, c06r3n09: optim_args=None, c06r3n09: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n09: overwrite_output_dir=False, c06r3n09: past_index=-1, c06r3n09: per_device_eval_batch_size=1, c06r3n09: per_device_train_batch_size=1, c06r3n09: predict_with_generate=False, c06r3n09: prediction_loss_only=False, c06r3n09: push_to_hub=False, c06r3n09: push_to_hub_model_id=None, c06r3n09: push_to_hub_organization=None, c06r3n09: push_to_hub_token=, c06r3n09: ray_scope=last, c06r3n09: remove_unused_columns=True, c06r3n09: report_to=['tensorboard'], c06r3n09: resume_from_checkpoint=None, c06r3n09: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n09: save_on_each_node=True, c06r3n09: save_only_model=False, c06r3n09: save_safetensors=True, c06r3n09: save_steps=100, c06r3n09: save_strategy=steps, c06r3n09: save_total_limit=None, c06r3n09: seed=42, c06r3n09: skip_memory_metrics=True, c06r3n09: sortish_sampler=False, c06r3n09: split_batches=False, c06r3n09: tf32=None, c06r3n09: torch_compile=False, c06r3n09: torch_compile_backend=None, c06r3n09: torch_compile_mode=None, c06r3n09: torchdynamo=None, c06r3n09: tpu_metrics_debug=False, c06r3n09: tpu_num_cores=None, c06r3n09: use_cpu=False, c06r3n09: use_ipex=False, c06r3n09: use_legacy_prediction_loop=False, c06r3n09: use_mps_device=False, c06r3n09: warmup_ratio=0.03, c06r3n09: warmup_steps=0, c06r3n09: weight_decay=0.0, c06r3n09: ) c06r3n09: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:30,466 >> loading file tokenizer.model c06r3n09: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:30,466 >> loading file added_tokens.json c06r3n09: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:30,466 >> loading file special_tokens_map.json c06r3n09: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:30,466 >> loading file tokenizer_config.json c06r3n09: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:30,466 >> loading file tokenizer.json c06r3n08: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Process rank: 2, device: cuda:2, n_gpu: 1 c06r3n08: distributed training: True, compute dtype: torch.float16 c06r3n08: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Process rank: 1, device: cuda:1, n_gpu: 1 c06r3n08: distributed training: True, compute dtype: torch.float16 c06r3n08: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n08: _n_gpu=1, c06r3n08: adafactor=False, c06r3n08: adam_beta1=0.9, c06r3n08: adam_beta2=0.999, c06r3n08: adam_epsilon=1e-08, c06r3n08: auto_find_batch_size=False, c06r3n08: bf16=False, c06r3n08: bf16_full_eval=False, c06r3n08: data_seed=None, c06r3n08: dataloader_drop_last=False, c06r3n08: dataloader_num_workers=0, c06r3n08: dataloader_persistent_workers=False, c06r3n08: dataloader_pin_memory=True, c06r3n08: ddp_backend=None, c06r3n08: ddp_broadcast_buffers=None, c06r3n08: ddp_bucket_cap_mb=None, c06r3n08: ddp_find_unused_parameters=None, c06r3n08: ddp_timeout=1800, c06r3n08: debug=[], c06r3n08: deepspeed=deepspeed.json, c06r3n08: disable_tqdm=False, c06r3n08: dispatch_batches=None, c06r3n08: do_eval=True, c06r3n08: do_predict=False, c06r3n08: do_train=True, c06r3n08: eval_accumulation_steps=None, c06r3n08: eval_delay=0, c06r3n08: eval_steps=100, c06r3n08: evaluation_strategy=steps, c06r3n08: fp16=True, c06r3n08: fp16_backend=auto, c06r3n08: fp16_full_eval=False, c06r3n08: fp16_opt_level=O1, c06r3n08: fsdp=[], c06r3n08: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n08: fsdp_min_num_params=0, c06r3n08: fsdp_transformer_layer_cls_to_wrap=None, c06r3n08: full_determinism=False, c06r3n08: generation_config=None, c06r3n08: generation_max_length=None, c06r3n08: generation_num_beams=None, c06r3n08: gradient_accumulation_steps=1, c06r3n08: gradient_checkpointing=False, c06r3n08: gradient_checkpointing_kwargs=None, c06r3n08: greater_is_better=False, c06r3n08: group_by_length=False, c06r3n08: half_precision_backend=auto, c06r3n08: hub_always_push=False, c06r3n08: hub_model_id=None, c06r3n08: hub_private_repo=False, c06r3n08: hub_strategy=every_save, c06r3n08: hub_token=, c06r3n08: ignore_data_skip=False, c06r3n08: include_inputs_for_metrics=False, c06r3n08: include_num_input_tokens_seen=False, c06r3n08: include_tokens_per_second=False, c06r3n08: jit_mode_eval=False, c06r3n08: label_names=None, c06r3n08: label_smoothing_factor=0.0, c06r3n08: learning_rate=5e-05, c06r3n08: length_column_name=length, c06r3n08: load_best_model_at_end=True, c06r3n08: local_rank=1, c06r3n08: log_level=passive, c06r3n08: log_level_replica=warning, c06r3n08: log_on_each_node=True, c06r3n08: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n08, c06r3n08: logging_first_step=False, c06r3n08: logging_nan_inf_filter=True, c06r3n08: logging_steps=10, c06r3n08: logging_strategy=steps, c06r3n08: lr_scheduler_kwargs={}, c06r3n08: lr_scheduler_type=cosine, c06r3n08: max_grad_norm=0.5, c06r3n08: max_steps=-1, c06r3n08: metric_for_best_model=loss, c06r3n08: mp_parameters=, c06r3n08: neftune_noise_alpha=None, c06r3n08: no_cuda=False, c06r3n08: num_train_epochs=4.0, c06r3n08: optim=adamw_torch, c06r3n08: optim_args=None, c06r3n08: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n08: overwrite_output_dir=False, c06r3n08: past_index=-1, c06r3n08: per_device_eval_batch_size=1, c06r3n08: per_device_train_batch_size=1, c06r3n08: predict_with_generate=False, c06r3n08: prediction_loss_only=False, c06r3n08: push_to_hub=False, c06r3n08: push_to_hub_model_id=None, c06r3n08: push_to_hub_organization=None, c06r3n08: push_to_hub_token=, c06r3n08: ray_scope=last, c06r3n08: remove_unused_columns=True, c06r3n08: report_to=['tensorboard'], c06r3n08: resume_from_checkpoint=None, c06r3n08: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n08: save_on_each_node=True, c06r3n08: save_only_model=False, c06r3n08: save_safetensors=True, c06r3n08: save_steps=100, c06r3n08: save_strategy=steps, c06r3n08: save_total_limit=None, c06r3n08: seed=42, c06r3n08: skip_memory_metrics=True, c06r3n08: sortish_sampler=False, c06r3n08: split_batches=False, c06r3n08: tf32=None, c06r3n08: torch_compile=False, c06r3n08: torch_compile_backend=None, c06r3n08: torch_compile_mode=None, c06r3n08: torchdynamo=None, c06r3n08: tpu_metrics_debug=False, c06r3n08: tpu_num_cores=None, c06r3n08: use_cpu=False, c06r3n08: use_ipex=False, c06r3n08: use_legacy_prediction_loop=False, c06r3n08: use_mps_device=False, c06r3n08: warmup_ratio=0.03, c06r3n08: warmup_steps=0, c06r3n08: weight_decay=0.0, c06r3n08: ) c06r3n08: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n08: _n_gpu=1, c06r3n08: adafactor=False, c06r3n08: adam_beta1=0.9, c06r3n08: adam_beta2=0.999, c06r3n08: adam_epsilon=1e-08, c06r3n08: auto_find_batch_size=False, c06r3n08: bf16=False, c06r3n08: bf16_full_eval=False, c06r3n08: data_seed=None, c06r3n08: dataloader_drop_last=False, c06r3n08: dataloader_num_workers=0, c06r3n08: dataloader_persistent_workers=False, c06r3n08: dataloader_pin_memory=True, c06r3n08: ddp_backend=None, c06r3n08: ddp_broadcast_buffers=None, c06r3n08: ddp_bucket_cap_mb=None, c06r3n08: ddp_find_unused_parameters=None, c06r3n08: ddp_timeout=1800, c06r3n08: debug=[], c06r3n08: deepspeed=deepspeed.json, c06r3n08: disable_tqdm=False, c06r3n08: dispatch_batches=None, c06r3n08: do_eval=True, c06r3n08: do_predict=False, c06r3n08: do_train=True, c06r3n08: eval_accumulation_steps=None, c06r3n08: eval_delay=0, c06r3n08: eval_steps=100, c06r3n08: evaluation_strategy=steps, c06r3n08: fp16=True, c06r3n08: fp16_backend=auto, c06r3n08: fp16_full_eval=False, c06r3n08: fp16_opt_level=O1, c06r3n08: fsdp=[], c06r3n08: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n08: fsdp_min_num_params=0, c06r3n08: fsdp_transformer_layer_cls_to_wrap=None, c06r3n08: full_determinism=False, c06r3n08: generation_config=None, c06r3n08: generation_max_length=None, c06r3n08: generation_num_beams=None, c06r3n08: gradient_accumulation_steps=1, c06r3n08: gradient_checkpointing=False, c06r3n08: gradient_checkpointing_kwargs=None, c06r3n08: greater_is_better=False, c06r3n08: group_by_length=False, c06r3n08: half_precision_backend=auto, c06r3n08: hub_always_push=False, c06r3n08: hub_model_id=None, c06r3n08: hub_private_repo=False, c06r3n08: hub_strategy=every_save, c06r3n08: hub_token=, c06r3n08: ignore_data_skip=False, c06r3n08: include_inputs_for_metrics=False, c06r3n08: include_num_input_tokens_seen=False, c06r3n08: include_tokens_per_second=False, c06r3n08: jit_mode_eval=False, c06r3n08: label_names=None, c06r3n08: label_smoothing_factor=0.0, c06r3n08: learning_rate=5e-05, c06r3n08: length_column_name=length, c06r3n08: load_best_model_at_end=True, c06r3n08: local_rank=2, c06r3n08: log_level=passive, c06r3n08: log_level_replica=warning, c06r3n08: log_on_each_node=True, c06r3n08: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n08, c06r3n08: logging_first_step=False, c06r3n08: logging_nan_inf_filter=True, c06r3n08: logging_steps=10, c06r3n08: logging_strategy=steps, c06r3n08: lr_scheduler_kwargs={}, c06r3n08: lr_scheduler_type=cosine, c06r3n08: max_grad_norm=0.5, c06r3n08: max_steps=-1, c06r3n08: metric_for_best_model=loss, c06r3n08: mp_parameters=, c06r3n08: neftune_noise_alpha=None, c06r3n08: no_cuda=False, c06r3n08: num_train_epochs=4.0, c06r3n08: optim=adamw_torch, c06r3n08: optim_args=None, c06r3n08: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n08: overwrite_output_dir=False, c06r3n07: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Process rank: 2, device: cuda:2, n_gpu: 1 c06r3n07: distributed training: True, compute dtype: torch.float16 c06r3n08: past_index=-1, c06r3n08: per_device_eval_batch_size=1, c06r3n08: per_device_train_batch_size=1, c06r3n08: predict_with_generate=False, c06r3n08: prediction_loss_only=False, c06r3n08: push_to_hub=False, c06r3n08: push_to_hub_model_id=None, c06r3n08: push_to_hub_organization=None, c06r3n08: push_to_hub_token=, c06r3n08: ray_scope=last, c06r3n08: remove_unused_columns=True, c06r3n08: report_to=['tensorboard'], c06r3n08: resume_from_checkpoint=None, c06r3n08: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n08: save_on_each_node=True, c06r3n08: save_only_model=False, c06r3n08: save_safetensors=True, c06r3n08: save_steps=100, c06r3n08: save_strategy=steps, c06r3n08: save_total_limit=None, c06r3n08: seed=42, c06r3n08: skip_memory_metrics=True, c06r3n08: sortish_sampler=False, c06r3n08: split_batches=False, c06r3n08: tf32=None, c06r3n08: torch_compile=False, c06r3n08: torch_compile_backend=None, c06r3n08: torch_compile_mode=None, c06r3n08: torchdynamo=None, c06r3n08: tpu_metrics_debug=False, c06r3n08: tpu_num_cores=None, c06r3n08: use_cpu=False, c06r3n08: use_ipex=False, c06r3n08: use_legacy_prediction_loop=False, c06r3n08: use_mps_device=False, c06r3n08: warmup_ratio=0.03, c06r3n08: warmup_steps=0, c06r3n08: weight_decay=0.0, c06r3n08: ) c06r3n07: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Process rank: 0, device: cuda:0, n_gpu: 1 c06r3n07: distributed training: True, compute dtype: torch.float16 c06r3n07: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n07: _n_gpu=1, c06r3n07: adafactor=False, c06r3n07: adam_beta1=0.9, c06r3n07: adam_beta2=0.999, c06r3n07: adam_epsilon=1e-08, c06r3n07: auto_find_batch_size=False, c06r3n07: bf16=False, c06r3n07: bf16_full_eval=False, c06r3n07: data_seed=None, c06r3n07: dataloader_drop_last=False, c06r3n07: dataloader_num_workers=0, c06r3n07: dataloader_persistent_workers=False, c06r3n07: dataloader_pin_memory=True, c06r3n07: ddp_backend=None, c06r3n07: ddp_broadcast_buffers=None, c06r3n07: ddp_bucket_cap_mb=None, c06r3n07: ddp_find_unused_parameters=None, c06r3n07: ddp_timeout=1800, c06r3n07: debug=[], c06r3n07: deepspeed=deepspeed.json, c06r3n07: disable_tqdm=False, c06r3n07: dispatch_batches=None, c06r3n07: do_eval=True, c06r3n07: do_predict=False, c06r3n07: do_train=True, c06r3n07: eval_accumulation_steps=None, c06r3n07: eval_delay=0, c06r3n07: eval_steps=100, c06r3n07: evaluation_strategy=steps, c06r3n07: fp16=True, c06r3n07: fp16_backend=auto, c06r3n07: fp16_full_eval=False, c06r3n07: fp16_opt_level=O1, c06r3n07: fsdp=[], c06r3n07: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n07: fsdp_min_num_params=0, c06r3n07: fsdp_transformer_layer_cls_to_wrap=None, c06r3n07: full_determinism=False, c06r3n07: generation_config=None, c06r3n07: generation_max_length=None, c06r3n07: generation_num_beams=None, c06r3n07: gradient_accumulation_steps=1, c06r3n07: gradient_checkpointing=False, c06r3n07: gradient_checkpointing_kwargs=None, c06r3n07: greater_is_better=False, c06r3n07: group_by_length=False, c06r3n07: half_precision_backend=auto, c06r3n07: hub_always_push=False, c06r3n07: hub_model_id=None, c06r3n07: hub_private_repo=False, c06r3n07: hub_strategy=every_save, c06r3n07: hub_token=, c06r3n07: ignore_data_skip=False, c06r3n07: include_inputs_for_metrics=False, c06r3n07: include_num_input_tokens_seen=False, c06r3n07: include_tokens_per_second=False, c06r3n07: jit_mode_eval=False, c06r3n07: label_names=None, c06r3n07: label_smoothing_factor=0.0, c06r3n07: learning_rate=5e-05, c06r3n07: length_column_name=length, c06r3n07: load_best_model_at_end=True, c06r3n07: local_rank=2, c06r3n07: log_level=passive, c06r3n07: log_level_replica=warning, c06r3n07: log_on_each_node=True, c06r3n07: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n07, c06r3n07: logging_first_step=False, c06r3n07: logging_nan_inf_filter=True, c06r3n07: logging_steps=10, c06r3n07: logging_strategy=steps, c06r3n07: lr_scheduler_kwargs={}, c06r3n07: lr_scheduler_type=cosine, c06r3n07: max_grad_norm=0.5, c06r3n07: max_steps=-1, c06r3n07: metric_for_best_model=loss, c06r3n07: mp_parameters=, c06r3n07: neftune_noise_alpha=None, c06r3n07: no_cuda=False, c06r3n07: num_train_epochs=4.0, c06r3n07: optim=adamw_torch, c06r3n07: optim_args=None, c06r3n07: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n07: overwrite_output_dir=False, c06r3n07: past_index=-1, c06r3n07: per_device_eval_batch_size=1, c06r3n07: per_device_train_batch_size=1, c06r3n07: predict_with_generate=False, c06r3n07: prediction_loss_only=False, c06r3n07: push_to_hub=False, c06r3n07: push_to_hub_model_id=None, c06r3n07: push_to_hub_organization=None, c06r3n07: push_to_hub_token=, c06r3n07: ray_scope=last, c06r3n07: remove_unused_columns=True, c06r3n07: report_to=['tensorboard'], c06r3n07: resume_from_checkpoint=None, c06r3n07: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n07: save_on_each_node=True, c06r3n07: save_only_model=False, c06r3n07: save_safetensors=True, c06r3n07: save_steps=100, c06r3n07: save_strategy=steps, c06r3n07: save_total_limit=None, c06r3n07: seed=42, c06r3n07: skip_memory_metrics=True, c06r3n07: sortish_sampler=False, c06r3n07: split_batches=False, c06r3n07: tf32=None, c06r3n07: torch_compile=False, c06r3n07: torch_compile_backend=None, c06r3n07: torch_compile_mode=None, c06r3n07: torchdynamo=None, c06r3n07: tpu_metrics_debug=False, c06r3n07: tpu_num_cores=None, c06r3n07: use_cpu=False, c06r3n07: use_ipex=False, c06r3n07: use_legacy_prediction_loop=False, c06r3n07: use_mps_device=False, c06r3n07: warmup_ratio=0.03, c06r3n07: warmup_steps=0, c06r3n07: weight_decay=0.0, c06r3n07: ) c06r3n07: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Process rank: 3, device: cuda:3, n_gpu: 1 c06r3n07: distributed training: True, compute dtype: torch.float16 c06r3n07: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n07: _n_gpu=1, c06r3n07: adafactor=False, c06r3n07: adam_beta1=0.9, c06r3n07: adam_beta2=0.999, c06r3n07: adam_epsilon=1e-08, c06r3n07: auto_find_batch_size=False, c06r3n07: bf16=False, c06r3n07: bf16_full_eval=False, c06r3n07: data_seed=None, c06r3n07: dataloader_drop_last=False, c06r3n07: dataloader_num_workers=0, c06r3n07: dataloader_persistent_workers=False, c06r3n07: dataloader_pin_memory=True, c06r3n07: ddp_backend=None, c06r3n07: ddp_broadcast_buffers=None, c06r3n07: ddp_bucket_cap_mb=None, c06r3n07: ddp_find_unused_parameters=None, c06r3n07: ddp_timeout=1800, c06r3n07: debug=[], c06r3n07: deepspeed=deepspeed.json, c06r3n07: disable_tqdm=False, c06r3n07: dispatch_batches=None, c06r3n07: do_eval=True, c06r3n07: do_predict=False, c06r3n07: do_train=True, c06r3n07: eval_accumulation_steps=None, c06r3n07: eval_delay=0, c06r3n07: eval_steps=100, c06r3n07: evaluation_strategy=steps, c06r3n07: fp16=True, c06r3n07: fp16_backend=auto, c06r3n07: fp16_full_eval=False, c06r3n07: fp16_opt_level=O1, c06r3n07: fsdp=[], c06r3n07: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n07: fsdp_min_num_params=0, c06r3n07: fsdp_transformer_layer_cls_to_wrap=None, c06r3n07: full_determinism=False, c06r3n07: generation_config=None, c06r3n07: generation_max_length=None, c06r3n07: generation_num_beams=None, c06r3n07: gradient_accumulation_steps=1, c06r3n07: gradient_checkpointing=False, c06r3n07: gradient_checkpointing_kwargs=None, c06r3n07: greater_is_better=False, c06r3n07: group_by_length=False, c06r3n07: half_precision_backend=auto, c06r3n07: hub_always_push=False, c06r3n07: hub_model_id=None, c06r3n07: hub_private_repo=False, c06r3n07: hub_strategy=every_save, c06r3n07: hub_token=, c06r3n07: ignore_data_skip=False, c06r3n07: include_inputs_for_metrics=False, c06r3n07: include_num_input_tokens_seen=False, c06r3n07: include_tokens_per_second=False, c06r3n07: jit_mode_eval=False, c06r3n07: label_names=None, c06r3n07: label_smoothing_factor=0.0, c06r3n07: learning_rate=5e-05, c06r3n07: length_column_name=length, c06r3n07: load_best_model_at_end=True, c06r3n07: local_rank=0, c06r3n07: log_level=passive, c06r3n07: log_level_replica=warning, c06r3n07: log_on_each_node=True, c06r3n07: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n07, c06r3n07: logging_first_step=False, c06r3n07: logging_nan_inf_filter=True, c06r3n07: logging_steps=10, c06r3n07: logging_strategy=steps, c06r3n07: lr_scheduler_kwargs={}, c06r3n07: lr_scheduler_type=cosine, c06r3n07: max_grad_norm=0.5, c06r3n07: max_steps=-1, c06r3n07: metric_for_best_model=loss, c06r3n07: mp_parameters=, c06r3n07: neftune_noise_alpha=None, c06r3n07: no_cuda=False, c06r3n07: num_train_epochs=4.0, c06r3n07: optim=adamw_torch, c06r3n07: optim_args=None, c06r3n07: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n07: overwrite_output_dir=False, c06r3n07: past_index=-1, c06r3n07: per_device_eval_batch_size=1, c06r3n07: per_device_train_batch_size=1, c06r3n07: predict_with_generate=False, c06r3n07: prediction_loss_only=False, c06r3n07: push_to_hub=False, c06r3n07: push_to_hub_model_id=None, c06r3n07: push_to_hub_organization=None, c06r3n07: push_to_hub_token=, c06r3n07: ray_scope=last, c06r3n07: remove_unused_columns=True, c06r3n07: report_to=['tensorboard'], c06r3n07: resume_from_checkpoint=None, c06r3n07: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n07: save_on_each_node=True, c06r3n07: save_only_model=False, c06r3n07: save_safetensors=True, c06r3n07: save_steps=100, c06r3n07: save_strategy=steps, c06r3n07: save_total_limit=None, c06r3n07: seed=42, c06r3n07: skip_memory_metrics=True, c06r3n07: sortish_sampler=False, c06r3n07: split_batches=False, c06r3n07: tf32=None, c06r3n07: torch_compile=False, c06r3n07: torch_compile_backend=None, c06r3n07: torch_compile_mode=None, c06r3n07: torchdynamo=None, c06r3n07: tpu_metrics_debug=False, c06r3n07: tpu_num_cores=None, c06r3n07: use_cpu=False, c06r3n07: use_ipex=False, c06r3n07: use_legacy_prediction_loop=False, c06r3n07: use_mps_device=False, c06r3n07: warmup_ratio=0.03, c06r3n07: warmup_steps=0, c06r3n07: weight_decay=0.0, c06r3n07: ) c06r3n07: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Process rank: 1, device: cuda:1, n_gpu: 1 c06r3n07: distributed training: True, compute dtype: torch.float16 c06r3n07: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n07: _n_gpu=1, c06r3n07: adafactor=False, c06r3n07: adam_beta1=0.9, c06r3n07: adam_beta2=0.999, c06r3n07: adam_epsilon=1e-08, c06r3n07: auto_find_batch_size=False, c06r3n07: bf16=False, c06r3n07: bf16_full_eval=False, c06r3n07: data_seed=None, c06r3n07: dataloader_drop_last=False, c06r3n07: dataloader_num_workers=0, c06r3n07: dataloader_persistent_workers=False, c06r3n07: dataloader_pin_memory=True, c06r3n07: ddp_backend=None, c06r3n07: ddp_broadcast_buffers=None, c06r3n07: ddp_bucket_cap_mb=None, c06r3n07: ddp_find_unused_parameters=None, c06r3n07: ddp_timeout=1800, c06r3n07: debug=[], c06r3n07: deepspeed=deepspeed.json, c06r3n07: disable_tqdm=False, c06r3n07: dispatch_batches=None, c06r3n07: do_eval=True, c06r3n07: do_predict=False, c06r3n07: do_train=True, c06r3n07: eval_accumulation_steps=None, c06r3n07: eval_delay=0, c06r3n07: eval_steps=100, c06r3n07: evaluation_strategy=steps, c06r3n07: fp16=True, c06r3n07: fp16_backend=auto, c06r3n07: fp16_full_eval=False, c06r3n07: fp16_opt_level=O1, c06r3n07: fsdp=[], c06r3n07: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n07: fsdp_min_num_params=0, c06r3n07: fsdp_transformer_layer_cls_to_wrap=None, c06r3n07: full_determinism=False, c06r3n07: generation_config=None, c06r3n07: generation_max_length=None, c06r3n07: generation_num_beams=None, c06r3n07: gradient_accumulation_steps=1, c06r3n07: gradient_checkpointing=False, c06r3n07: gradient_checkpointing_kwargs=None, c06r3n07: greater_is_better=False, c06r3n07: group_by_length=False, c06r3n07: half_precision_backend=auto, c06r3n09: You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n07: hub_always_push=False, c06r3n07: hub_model_id=None, c06r3n07: hub_private_repo=False, c06r3n07: hub_strategy=every_save, c06r3n07: hub_token=, c06r3n07: ignore_data_skip=False, c06r3n07: include_inputs_for_metrics=False, c06r3n07: include_num_input_tokens_seen=False, c06r3n07: include_tokens_per_second=False, c06r3n07: jit_mode_eval=False, c06r3n09: You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n07: label_names=None, c06r3n07: label_smoothing_factor=0.0, c06r3n07: learning_rate=5e-05, c06r3n07: length_column_name=length, c06r3n07: load_best_model_at_end=True, c06r3n07: local_rank=3, c06r3n07: log_level=passive, c06r3n07: log_level_replica=warning, c06r3n07: log_on_each_node=True, c06r3n07: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n07, c06r3n07: logging_first_step=False, c06r3n07: logging_nan_inf_filter=True, c06r3n07: logging_steps=10, c06r3n07: logging_strategy=steps, c06r3n07: lr_scheduler_kwargs={}, c06r3n07: lr_scheduler_type=cosine, c06r3n07: max_grad_norm=0.5, c06r3n07: max_steps=-1, c06r3n07: metric_for_best_model=loss, c06r3n07: mp_parameters=, c06r3n07: neftune_noise_alpha=None, c06r3n07: no_cuda=False, c06r3n07: num_train_epochs=4.0, c06r3n07: optim=adamw_torch, c06r3n07: optim_args=None, c06r3n07: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n07: overwrite_output_dir=False, c06r3n07: past_index=-1, c06r3n07: per_device_eval_batch_size=1, c06r3n07: per_device_train_batch_size=1, c06r3n07: predict_with_generate=False, c06r3n07: prediction_loss_only=False, c06r3n07: push_to_hub=False, c06r3n07: push_to_hub_model_id=None, c06r3n07: push_to_hub_organization=None, c06r3n07: push_to_hub_token=, c06r3n07: ray_scope=last, c06r3n09: [WARNING|logging.py:329] 2024-03-15 11:10:30,500 >> You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n07: remove_unused_columns=True, c06r3n07: report_to=['tensorboard'], c06r3n07: resume_from_checkpoint=None, c06r3n07: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n07: save_on_each_node=True, c06r3n07: save_only_model=False, c06r3n07: save_safetensors=True, c06r3n07: save_steps=100, c06r3n07: save_strategy=steps, c06r3n07: save_total_limit=None, c06r3n07: seed=42, c06r3n07: skip_memory_metrics=True, c06r3n07: sortish_sampler=False, c06r3n07: split_batches=False, c06r3n07: tf32=None, c06r3n07: torch_compile=False, c06r3n07: torch_compile_backend=None, c06r3n07: torch_compile_mode=None, c06r3n07: torchdynamo=None, c06r3n07: tpu_metrics_debug=False, c06r3n07: tpu_num_cores=None, c06r3n07: use_cpu=False, c06r3n07: use_ipex=False, c06r3n07: use_legacy_prediction_loop=False, c06r3n07: use_mps_device=False, c06r3n07: warmup_ratio=0.03, c06r3n07: warmup_steps=0, c06r3n07: weight_decay=0.0, c06r3n07: ) c06r3n07: 03/15/2024 11:10:30 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n07: _n_gpu=1, c06r3n07: adafactor=False, c06r3n07: adam_beta1=0.9, c06r3n07: adam_beta2=0.999, c06r3n07: adam_epsilon=1e-08, c06r3n07: auto_find_batch_size=False, c06r3n07: bf16=False, c06r3n07: bf16_full_eval=False, c06r3n07: data_seed=None, c06r3n07: dataloader_drop_last=False, c06r3n07: dataloader_num_workers=0, c06r3n07: dataloader_persistent_workers=False, c06r3n07: dataloader_pin_memory=True, c06r3n09: You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n07: ddp_backend=None, c06r3n07: ddp_broadcast_buffers=None, c06r3n07: ddp_bucket_cap_mb=None, c06r3n07: ddp_find_unused_parameters=None, c06r3n07: ddp_timeout=1800, c06r3n07: debug=[], c06r3n07: deepspeed=deepspeed.json, c06r3n07: disable_tqdm=False, c06r3n07: dispatch_batches=None, c06r3n07: do_eval=True, c06r3n07: do_predict=False, c06r3n07: do_train=True, c06r3n07: eval_accumulation_steps=None, c06r3n07: eval_delay=0, c06r3n07: eval_steps=100, c06r3n07: evaluation_strategy=steps, c06r3n07: fp16=True, c06r3n07: fp16_backend=auto, c06r3n07: fp16_full_eval=False, c06r3n07: fp16_opt_level=O1, c06r3n07: fsdp=[], c06r3n07: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n07: fsdp_min_num_params=0, c06r3n07: fsdp_transformer_layer_cls_to_wrap=None, c06r3n07: full_determinism=False, c06r3n07: generation_config=None, c06r3n07: generation_max_length=None, c06r3n07: generation_num_beams=None, c06r3n07: gradient_accumulation_steps=1, c06r3n07: gradient_checkpointing=False, c06r3n07: gradient_checkpointing_kwargs=None, c06r3n07: greater_is_better=False, c06r3n07: group_by_length=False, c06r3n07: half_precision_backend=auto, c06r3n07: hub_always_push=False, c06r3n07: hub_model_id=None, c06r3n07: hub_private_repo=False, c06r3n07: hub_strategy=every_save, c06r3n07: hub_token=, c06r3n07: ignore_data_skip=False, c06r3n07: include_inputs_for_metrics=False, c06r3n07: include_num_input_tokens_seen=False, c06r3n07: include_tokens_per_second=False, c06r3n07: jit_mode_eval=False, c06r3n07: label_names=None, c06r3n07: label_smoothing_factor=0.0, c06r3n07: learning_rate=5e-05, c06r3n07: length_column_name=length, c06r3n07: load_best_model_at_end=True, c06r3n07: local_rank=1, c06r3n07: log_level=passive, c06r3n07: log_level_replica=warning, c06r3n07: log_on_each_node=True, c06r3n07: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n07, c06r3n07: logging_first_step=False, c06r3n07: logging_nan_inf_filter=True, c06r3n07: logging_steps=10, c06r3n07: logging_strategy=steps, c06r3n07: lr_scheduler_kwargs={}, c06r3n07: lr_scheduler_type=cosine, c06r3n07: max_grad_norm=0.5, c06r3n07: max_steps=-1, c06r3n07: metric_for_best_model=loss, c06r3n07: mp_parameters=, c06r3n07: neftune_noise_alpha=None, c06r3n07: no_cuda=False, c06r3n07: num_train_epochs=4.0, c06r3n07: optim=adamw_torch, c06r3n07: optim_args=None, c06r3n07: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n07: overwrite_output_dir=False, c06r3n07: past_index=-1, c06r3n07: per_device_eval_batch_size=1, c06r3n07: per_device_train_batch_size=1, c06r3n07: predict_with_generate=False, c06r3n07: prediction_loss_only=False, c06r3n07: push_to_hub=False, c06r3n07: push_to_hub_model_id=None, c06r3n07: push_to_hub_organization=None, c06r3n07: push_to_hub_token=, c06r3n07: ray_scope=last, c06r3n07: remove_unused_columns=True, c06r3n07: report_to=['tensorboard'], c06r3n07: resume_from_checkpoint=None, c06r3n07: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n07: save_on_each_node=True, c06r3n07: save_only_model=False, c06r3n07: save_safetensors=True, c06r3n07: save_steps=100, c06r3n07: save_strategy=steps, c06r3n07: save_total_limit=None, c06r3n07: seed=42, c06r3n07: skip_memory_metrics=True, c06r3n07: sortish_sampler=False, c06r3n07: split_batches=False, c06r3n07: tf32=None, c06r3n07: torch_compile=False, c06r3n07: torch_compile_backend=None, c06r3n07: torch_compile_mode=None, c06r3n07: torchdynamo=None, c06r3n07: tpu_metrics_debug=False, c06r3n07: tpu_num_cores=None, c06r3n07: use_cpu=False, c06r3n07: use_ipex=False, c06r3n07: use_legacy_prediction_loop=False, c06r3n07: use_mps_device=False, c06r3n07: warmup_ratio=0.03, c06r3n07: warmup_steps=0, c06r3n07: weight_decay=0.0, c06r3n07: ) c06r3n07: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:30,520 >> loading file tokenizer.model c06r3n07: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:30,520 >> loading file added_tokens.json c06r3n07: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:30,520 >> loading file special_tokens_map.json c06r3n07: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:30,520 >> loading file tokenizer_config.json c06r3n07: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:30,520 >> loading file tokenizer.json c06r3n07: You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n07: [WARNING|logging.py:329] 2024-03-15 11:10:30,523 >> You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n07: You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n07: You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n08: You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n08: You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n09: [INFO|configuration_utils.py:727] 2024-03-15 11:10:30,638 >> loading configuration file /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b/config.json c06r3n07: [INFO|configuration_utils.py:727] 2024-03-15 11:10:30,640 >> loading configuration file /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b/config.json c06r3n09: [INFO|configuration_utils.py:792] 2024-03-15 11:10:30,640 >> Model config LlamaConfig { c06r3n09: "_name_or_path": "/work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b", c06r3n09: "architectures": [ c06r3n09: "LlamaForCausalLM" c06r3n09: ], c06r3n09: "attention_bias": false, c06r3n09: "attention_dropout": 0.0, c06r3n09: "bos_token_id": 0, c06r3n09: "eos_token_id": 1, c06r3n09: "hidden_act": "silu", c06r3n09: "hidden_size": 4096, c06r3n09: "initializer_range": 0.02, c06r3n09: "intermediate_size": 11008, c06r3n09: "max_position_embeddings": 2048, c06r3n09: "max_sequence_length": 2048, c06r3n09: "model_type": "llama", c06r3n09: "num_attention_heads": 32, c06r3n09: "num_hidden_layers": 32, c06r3n09: "num_key_value_heads": 32, c06r3n09: "pad_token_id": -1, c06r3n09: "pretraining_tp": 1, c06r3n09: "rms_norm_eps": 1e-06, c06r3n09: "rope_scaling": null, c06r3n09: "rope_theta": 10000.0, c06r3n09: "tie_word_embeddings": false, c06r3n09: "torch_dtype": "float16", c06r3n09: "transformers_version": "4.37.2", c06r3n09: "use_cache": true, c06r3n09: "vocab_size": 32000 c06r3n09: } c06r3n09: c06r3n07: [INFO|configuration_utils.py:792] 2024-03-15 11:10:30,641 >> Model config LlamaConfig { c06r3n07: "_name_or_path": "/work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b", c06r3n07: "architectures": [ c06r3n07: "LlamaForCausalLM" c06r3n07: ], c06r3n07: "attention_bias": false, c06r3n07: "attention_dropout": 0.0, c06r3n07: "bos_token_id": 0, c06r3n07: "eos_token_id": 1, c06r3n07: "hidden_act": "silu", c06r3n07: "hidden_size": 4096, c06r3n07: "initializer_range": 0.02, c06r3n07: "intermediate_size": 11008, c06r3n07: "max_position_embeddings": 2048, c06r3n07: "max_sequence_length": 2048, c06r3n07: "model_type": "llama", c06r3n07: "num_attention_heads": 32, c06r3n07: "num_hidden_layers": 32, c06r3n07: "num_key_value_heads": 32, c06r3n07: "pad_token_id": -1, c06r3n07: "pretraining_tp": 1, c06r3n07: "rms_norm_eps": 1e-06, c06r3n07: "rope_scaling": null, c06r3n07: "rope_theta": 10000.0, c06r3n07: "tie_word_embeddings": false, c06r3n07: "torch_dtype": "float16", c06r3n07: "transformers_version": "4.37.2", c06r3n07: "use_cache": true, c06r3n07: "vocab_size": 32000 c06r3n07: } c06r3n07: c06r3n08: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n08: I0315 11:10:31.319677 10990 ProcessGroupNCCL.cpp:686] [Rank 8] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=264344064 c06r3n08: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n08: I0315 11:10:31.319717 10993 ProcessGroupNCCL.cpp:686] [Rank 11] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=267519072 c06r3n06: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n06: I0315 11:10:31.328119 15480 ProcessGroupNCCL.cpp:686] [Rank 3] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=262204624 c06r3n06: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n06: I0315 11:10:31.328266 15478 ProcessGroupNCCL.cpp:686] [Rank 1] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=259081024 c06r3n06: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n06: I0315 11:10:31.328356 15479 ProcessGroupNCCL.cpp:686] [Rank 2] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=274692336 c06r3n06: WARNING: Logging before InitGoogleLogging() is written to STDERR c06r3n06: I0315 11:10:31.328366 15477 ProcessGroupNCCL.cpp:686] [Rank 0] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=268340736 c06r3n08: 03/15/2024 11:10:31 - INFO - llmtuner.hparams.parser - Process rank: 3, device: cuda:3, n_gpu: 1 c06r3n08: distributed training: True, compute dtype: torch.float16 c06r3n08: 03/15/2024 11:10:31 - INFO - llmtuner.hparams.parser - Process rank: 0, device: cuda:0, n_gpu: 1 c06r3n08: distributed training: True, compute dtype: torch.float16 c06r3n08: 03/15/2024 11:10:31 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n08: _n_gpu=1, c06r3n08: adafactor=False, c06r3n08: adam_beta1=0.9, c06r3n08: adam_beta2=0.999, c06r3n08: adam_epsilon=1e-08, c06r3n08: auto_find_batch_size=False, c06r3n08: bf16=False, c06r3n08: bf16_full_eval=False, c06r3n08: data_seed=None, c06r3n08: dataloader_drop_last=False, c06r3n08: dataloader_num_workers=0, c06r3n08: dataloader_persistent_workers=False, c06r3n08: dataloader_pin_memory=True, c06r3n08: ddp_backend=None, c06r3n08: ddp_broadcast_buffers=None, c06r3n08: ddp_bucket_cap_mb=None, c06r3n08: ddp_find_unused_parameters=None, c06r3n08: ddp_timeout=1800, c06r3n08: debug=[], c06r3n08: deepspeed=deepspeed.json, c06r3n08: disable_tqdm=False, c06r3n08: dispatch_batches=None, c06r3n08: do_eval=True, c06r3n08: do_predict=False, c06r3n08: do_train=True, c06r3n08: eval_accumulation_steps=None, c06r3n08: eval_delay=0, c06r3n08: eval_steps=100, c06r3n08: evaluation_strategy=steps, c06r3n08: fp16=True, c06r3n08: fp16_backend=auto, c06r3n08: fp16_full_eval=False, c06r3n08: fp16_opt_level=O1, c06r3n08: fsdp=[], c06r3n08: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n08: fsdp_min_num_params=0, c06r3n08: fsdp_transformer_layer_cls_to_wrap=None, c06r3n08: full_determinism=False, c06r3n08: generation_config=None, c06r3n08: generation_max_length=None, c06r3n08: generation_num_beams=None, c06r3n08: gradient_accumulation_steps=1, c06r3n08: gradient_checkpointing=False, c06r3n08: gradient_checkpointing_kwargs=None, c06r3n08: greater_is_better=False, c06r3n08: group_by_length=False, c06r3n08: half_precision_backend=auto, c06r3n08: hub_always_push=False, c06r3n08: hub_model_id=None, c06r3n08: hub_private_repo=False, c06r3n08: hub_strategy=every_save, c06r3n08: hub_token=, c06r3n08: ignore_data_skip=False, c06r3n08: include_inputs_for_metrics=False, c06r3n08: include_num_input_tokens_seen=False, c06r3n08: include_tokens_per_second=False, c06r3n08: jit_mode_eval=False, c06r3n08: label_names=None, c06r3n08: label_smoothing_factor=0.0, c06r3n08: learning_rate=5e-05, c06r3n08: length_column_name=length, c06r3n08: load_best_model_at_end=True, c06r3n08: local_rank=3, c06r3n08: log_level=passive, c06r3n08: log_level_replica=warning, c06r3n08: log_on_each_node=True, c06r3n08: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n08, c06r3n08: logging_first_step=False, c06r3n08: logging_nan_inf_filter=True, c06r3n08: logging_steps=10, c06r3n08: logging_strategy=steps, c06r3n08: lr_scheduler_kwargs={}, c06r3n08: lr_scheduler_type=cosine, c06r3n08: max_grad_norm=0.5, c06r3n08: max_steps=-1, c06r3n08: metric_for_best_model=loss, c06r3n08: mp_parameters=, c06r3n08: neftune_noise_alpha=None, c06r3n08: no_cuda=False, c06r3n08: num_train_epochs=4.0, c06r3n08: optim=adamw_torch, c06r3n08: optim_args=None, c06r3n08: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n08: overwrite_output_dir=False, c06r3n08: past_index=-1, c06r3n08: per_device_eval_batch_size=1, c06r3n08: per_device_train_batch_size=1, c06r3n08: predict_with_generate=False, c06r3n08: prediction_loss_only=False, c06r3n08: push_to_hub=False, c06r3n08: push_to_hub_model_id=None, c06r3n08: push_to_hub_organization=None, c06r3n08: push_to_hub_token=, c06r3n08: ray_scope=last, c06r3n08: remove_unused_columns=True, c06r3n08: report_to=['tensorboard'], c06r3n08: resume_from_checkpoint=None, c06r3n08: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n08: save_on_each_node=True, c06r3n08: save_only_model=False, c06r3n08: save_safetensors=True, c06r3n08: save_steps=100, c06r3n08: save_strategy=steps, c06r3n08: save_total_limit=None, c06r3n08: seed=42, c06r3n08: skip_memory_metrics=True, c06r3n08: sortish_sampler=False, c06r3n08: split_batches=False, c06r3n08: tf32=None, c06r3n08: torch_compile=False, c06r3n08: torch_compile_backend=None, c06r3n08: torch_compile_mode=None, c06r3n08: torchdynamo=None, c06r3n08: tpu_metrics_debug=False, c06r3n08: tpu_num_cores=None, c06r3n08: use_cpu=False, c06r3n08: use_ipex=False, c06r3n08: use_legacy_prediction_loop=False, c06r3n08: use_mps_device=False, c06r3n08: warmup_ratio=0.03, c06r3n08: warmup_steps=0, c06r3n08: weight_decay=0.0, c06r3n08: ) c06r3n08: 03/15/2024 11:10:31 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n08: _n_gpu=1, c06r3n08: adafactor=False, c06r3n08: adam_beta1=0.9, c06r3n08: adam_beta2=0.999, c06r3n08: adam_epsilon=1e-08, c06r3n08: auto_find_batch_size=False, c06r3n08: bf16=False, c06r3n08: bf16_full_eval=False, c06r3n08: data_seed=None, c06r3n08: dataloader_drop_last=False, c06r3n08: dataloader_num_workers=0, c06r3n08: dataloader_persistent_workers=False, c06r3n08: dataloader_pin_memory=True, c06r3n08: ddp_backend=None, c06r3n08: ddp_broadcast_buffers=None, c06r3n08: ddp_bucket_cap_mb=None, c06r3n08: ddp_find_unused_parameters=None, c06r3n08: ddp_timeout=1800, c06r3n08: debug=[], c06r3n08: deepspeed=deepspeed.json, c06r3n08: disable_tqdm=False, c06r3n08: dispatch_batches=None, c06r3n08: do_eval=True, c06r3n08: do_predict=False, c06r3n08: do_train=True, c06r3n08: eval_accumulation_steps=None, c06r3n08: eval_delay=0, c06r3n08: eval_steps=100, c06r3n08: evaluation_strategy=steps, c06r3n08: fp16=True, c06r3n08: fp16_backend=auto, c06r3n08: fp16_full_eval=False, c06r3n08: fp16_opt_level=O1, c06r3n08: fsdp=[], c06r3n08: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n08: fsdp_min_num_params=0, c06r3n08: fsdp_transformer_layer_cls_to_wrap=None, c06r3n08: full_determinism=False, c06r3n08: generation_config=None, c06r3n08: generation_max_length=None, c06r3n08: generation_num_beams=None, c06r3n08: gradient_accumulation_steps=1, c06r3n08: gradient_checkpointing=False, c06r3n08: gradient_checkpointing_kwargs=None, c06r3n08: greater_is_better=False, c06r3n08: group_by_length=False, c06r3n08: half_precision_backend=auto, c06r3n08: hub_always_push=False, c06r3n08: hub_model_id=None, c06r3n08: hub_private_repo=False, c06r3n08: hub_strategy=every_save, c06r3n08: hub_token=, c06r3n08: ignore_data_skip=False, c06r3n08: include_inputs_for_metrics=False, c06r3n08: include_num_input_tokens_seen=False, c06r3n08: include_tokens_per_second=False, c06r3n08: jit_mode_eval=False, c06r3n08: label_names=None, c06r3n08: label_smoothing_factor=0.0, c06r3n08: learning_rate=5e-05, c06r3n08: length_column_name=length, c06r3n08: load_best_model_at_end=True, c06r3n08: local_rank=0, c06r3n08: log_level=passive, c06r3n08: log_level_replica=warning, c06r3n08: log_on_each_node=True, c06r3n08: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n08, c06r3n08: logging_first_step=False, c06r3n08: logging_nan_inf_filter=True, c06r3n08: logging_steps=10, c06r3n08: logging_strategy=steps, c06r3n08: lr_scheduler_kwargs={}, c06r3n08: lr_scheduler_type=cosine, c06r3n08: max_grad_norm=0.5, c06r3n08: max_steps=-1, c06r3n08: metric_for_best_model=loss, c06r3n08: mp_parameters=, c06r3n08: neftune_noise_alpha=None, c06r3n08: no_cuda=False, c06r3n08: num_train_epochs=4.0, c06r3n08: optim=adamw_torch, c06r3n08: optim_args=None, c06r3n08: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n08: overwrite_output_dir=False, c06r3n08: past_index=-1, c06r3n08: per_device_eval_batch_size=1, c06r3n08: per_device_train_batch_size=1, c06r3n08: predict_with_generate=False, c06r3n08: prediction_loss_only=False, c06r3n08: push_to_hub=False, c06r3n08: push_to_hub_model_id=None, c06r3n08: push_to_hub_organization=None, c06r3n08: push_to_hub_token=, c06r3n08: ray_scope=last, c06r3n08: remove_unused_columns=True, c06r3n08: report_to=['tensorboard'], c06r3n08: resume_from_checkpoint=None, c06r3n08: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n08: save_on_each_node=True, c06r3n08: save_only_model=False, c06r3n08: save_safetensors=True, c06r3n08: save_steps=100, c06r3n08: save_strategy=steps, c06r3n08: save_total_limit=None, c06r3n08: seed=42, c06r3n08: skip_memory_metrics=True, c06r3n08: sortish_sampler=False, c06r3n08: split_batches=False, c06r3n08: tf32=None, c06r3n08: torch_compile=False, c06r3n08: torch_compile_backend=None, c06r3n08: torch_compile_mode=None, c06r3n08: torchdynamo=None, c06r3n08: tpu_metrics_debug=False, c06r3n08: tpu_num_cores=None, c06r3n08: use_cpu=False, c06r3n08: use_ipex=False, c06r3n08: use_legacy_prediction_loop=False, c06r3n08: use_mps_device=False, c06r3n08: warmup_ratio=0.03, c06r3n08: warmup_steps=0, c06r3n08: weight_decay=0.0, c06r3n08: ) c06r3n08: You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n08: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:31,371 >> loading file tokenizer.model c06r3n08: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:31,371 >> loading file added_tokens.json c06r3n08: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:31,371 >> loading file special_tokens_map.json c06r3n08: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:31,371 >> loading file tokenizer_config.json c06r3n08: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:31,372 >> loading file tokenizer.json c06r3n08: [WARNING|logging.py:329] 2024-03-15 11:10:31,372 >> You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n08: [INFO|configuration_utils.py:727] 2024-03-15 11:10:31,457 >> loading configuration file /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b/config.json c06r3n08: [INFO|configuration_utils.py:792] 2024-03-15 11:10:31,459 >> Model config LlamaConfig { c06r3n08: "_name_or_path": "/work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b", c06r3n08: "architectures": [ c06r3n08: "LlamaForCausalLM" c06r3n08: ], c06r3n08: "attention_bias": false, c06r3n08: "attention_dropout": 0.0, c06r3n08: "bos_token_id": 0, c06r3n08: "eos_token_id": 1, c06r3n08: "hidden_act": "silu", c06r3n08: "hidden_size": 4096, c06r3n08: "initializer_range": 0.02, c06r3n08: "intermediate_size": 11008, c06r3n08: "max_position_embeddings": 2048, c06r3n08: "max_sequence_length": 2048, c06r3n08: "model_type": "llama", c06r3n08: "num_attention_heads": 32, c06r3n08: "num_hidden_layers": 32, c06r3n08: "num_key_value_heads": 32, c06r3n08: "pad_token_id": -1, c06r3n08: "pretraining_tp": 1, c06r3n08: "rms_norm_eps": 1e-06, c06r3n08: "rope_scaling": null, c06r3n08: "rope_theta": 10000.0, c06r3n08: "tie_word_embeddings": false, c06r3n08: "torch_dtype": "float16", c06r3n08: "transformers_version": "4.37.2", c06r3n08: "use_cache": true, c06r3n08: "vocab_size": 32000 c06r3n08: } c06r3n08: c06r3n06: 03/15/2024 11:10:31 - INFO - llmtuner.hparams.parser - Process rank: 0, device: cuda:0, n_gpu: 1 c06r3n06: distributed training: True, compute dtype: torch.float16 c06r3n06: 03/15/2024 11:10:31 - INFO - llmtuner.hparams.parser - Process rank: 3, device: cuda:3, n_gpu: 1 c06r3n06: distributed training: True, compute dtype: torch.float16 c06r3n06: 03/15/2024 11:10:31 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n06: _n_gpu=1, c06r3n06: adafactor=False, c06r3n06: adam_beta1=0.9, c06r3n06: adam_beta2=0.999, c06r3n06: adam_epsilon=1e-08, c06r3n06: auto_find_batch_size=False, c06r3n06: bf16=False, c06r3n06: bf16_full_eval=False, c06r3n06: data_seed=None, c06r3n06: dataloader_drop_last=False, c06r3n06: dataloader_num_workers=0, c06r3n06: dataloader_persistent_workers=False, c06r3n06: dataloader_pin_memory=True, c06r3n06: ddp_backend=None, c06r3n06: ddp_broadcast_buffers=None, c06r3n06: ddp_bucket_cap_mb=None, c06r3n06: ddp_find_unused_parameters=None, c06r3n06: ddp_timeout=1800, c06r3n06: debug=[], c06r3n06: deepspeed=deepspeed.json, c06r3n06: disable_tqdm=False, c06r3n06: dispatch_batches=None, c06r3n06: do_eval=True, c06r3n06: do_predict=False, c06r3n06: do_train=True, c06r3n06: eval_accumulation_steps=None, c06r3n06: eval_delay=0, c06r3n06: eval_steps=100, c06r3n06: evaluation_strategy=steps, c06r3n06: fp16=True, c06r3n06: fp16_backend=auto, c06r3n06: fp16_full_eval=False, c06r3n06: fp16_opt_level=O1, c06r3n06: fsdp=[], c06r3n06: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n06: fsdp_min_num_params=0, c06r3n06: fsdp_transformer_layer_cls_to_wrap=None, c06r3n06: full_determinism=False, c06r3n06: generation_config=None, c06r3n06: generation_max_length=None, c06r3n06: generation_num_beams=None, c06r3n06: gradient_accumulation_steps=1, c06r3n06: gradient_checkpointing=False, c06r3n06: gradient_checkpointing_kwargs=None, c06r3n06: greater_is_better=False, c06r3n06: group_by_length=False, c06r3n06: half_precision_backend=auto, c06r3n06: hub_always_push=False, c06r3n06: hub_model_id=None, c06r3n06: hub_private_repo=False, c06r3n06: hub_strategy=every_save, c06r3n06: hub_token=, c06r3n06: ignore_data_skip=False, c06r3n06: include_inputs_for_metrics=False, c06r3n06: include_num_input_tokens_seen=False, c06r3n06: include_tokens_per_second=False, c06r3n06: jit_mode_eval=False, c06r3n06: label_names=None, c06r3n06: label_smoothing_factor=0.0, c06r3n06: learning_rate=5e-05, c06r3n06: length_column_name=length, c06r3n06: load_best_model_at_end=True, c06r3n06: local_rank=0, c06r3n06: log_level=passive, c06r3n06: log_level_replica=warning, c06r3n06: log_on_each_node=True, c06r3n06: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n06, c06r3n06: logging_first_step=False, c06r3n06: logging_nan_inf_filter=True, c06r3n06: logging_steps=10, c06r3n06: logging_strategy=steps, c06r3n06: lr_scheduler_kwargs={}, c06r3n06: lr_scheduler_type=cosine, c06r3n06: max_grad_norm=0.5, c06r3n06: max_steps=-1, c06r3n06: metric_for_best_model=loss, c06r3n06: mp_parameters=, c06r3n06: neftune_noise_alpha=None, c06r3n06: no_cuda=False, c06r3n06: num_train_epochs=4.0, c06r3n06: optim=adamw_torch, c06r3n06: optim_args=None, c06r3n06: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n06: overwrite_output_dir=False, c06r3n06: past_index=-1, c06r3n06: per_device_eval_batch_size=1, c06r3n06: per_device_train_batch_size=1, c06r3n06: predict_with_generate=False, c06r3n06: prediction_loss_only=False, c06r3n06: push_to_hub=False, c06r3n06: push_to_hub_model_id=None, c06r3n06: push_to_hub_organization=None, c06r3n06: push_to_hub_token=, c06r3n06: ray_scope=last, c06r3n06: remove_unused_columns=True, c06r3n06: report_to=['tensorboard'], c06r3n06: resume_from_checkpoint=None, c06r3n06: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n06: save_on_each_node=True, c06r3n06: save_only_model=False, c06r3n06: save_safetensors=True, c06r3n06: save_steps=100, c06r3n06: save_strategy=steps, c06r3n06: save_total_limit=None, c06r3n06: seed=42, c06r3n06: skip_memory_metrics=True, c06r3n06: sortish_sampler=False, c06r3n06: split_batches=False, c06r3n06: tf32=None, c06r3n06: torch_compile=False, c06r3n06: torch_compile_backend=None, c06r3n06: torch_compile_mode=None, c06r3n06: torchdynamo=None, c06r3n06: tpu_metrics_debug=False, c06r3n06: tpu_num_cores=None, c06r3n06: use_cpu=False, c06r3n06: use_ipex=False, c06r3n06: use_legacy_prediction_loop=False, c06r3n06: use_mps_device=False, c06r3n06: warmup_ratio=0.03, c06r3n06: warmup_steps=0, c06r3n06: weight_decay=0.0, c06r3n06: ) c06r3n06: 03/15/2024 11:10:31 - INFO - llmtuner.hparams.parser - Process rank: 1, device: cuda:1, n_gpu: 1 c06r3n06: distributed training: True, compute dtype: torch.float16 c06r3n06: 03/15/2024 11:10:31 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n06: _n_gpu=1, c06r3n06: adafactor=False, c06r3n06: adam_beta1=0.9, c06r3n06: adam_beta2=0.999, c06r3n06: adam_epsilon=1e-08, c06r3n06: auto_find_batch_size=False, c06r3n06: bf16=False, c06r3n06: bf16_full_eval=False, c06r3n06: data_seed=None, c06r3n06: dataloader_drop_last=False, c06r3n06: dataloader_num_workers=0, c06r3n06: dataloader_persistent_workers=False, c06r3n06: dataloader_pin_memory=True, c06r3n06: ddp_backend=None, c06r3n06: ddp_broadcast_buffers=None, c06r3n06: ddp_bucket_cap_mb=None, c06r3n06: ddp_find_unused_parameters=None, c06r3n06: ddp_timeout=1800, c06r3n06: debug=[], c06r3n06: deepspeed=deepspeed.json, c06r3n06: disable_tqdm=False, c06r3n06: dispatch_batches=None, c06r3n06: do_eval=True, c06r3n06: do_predict=False, c06r3n06: do_train=True, c06r3n06: eval_accumulation_steps=None, c06r3n06: eval_delay=0, c06r3n06: eval_steps=100, c06r3n06: evaluation_strategy=steps, c06r3n06: fp16=True, c06r3n06: fp16_backend=auto, c06r3n06: fp16_full_eval=False, c06r3n06: fp16_opt_level=O1, c06r3n06: fsdp=[], c06r3n06: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n06: fsdp_min_num_params=0, c06r3n06: fsdp_transformer_layer_cls_to_wrap=None, c06r3n06: full_determinism=False, c06r3n06: generation_config=None, c06r3n06: generation_max_length=None, c06r3n06: generation_num_beams=None, c06r3n06: gradient_accumulation_steps=1, c06r3n06: gradient_checkpointing=False, c06r3n06: gradient_checkpointing_kwargs=None, c06r3n06: greater_is_better=False, c06r3n06: group_by_length=False, c06r3n06: half_precision_backend=auto, c06r3n06: hub_always_push=False, c06r3n06: hub_model_id=None, c06r3n06: hub_private_repo=False, c06r3n06: hub_strategy=every_save, c06r3n06: hub_token=, c06r3n06: ignore_data_skip=False, c06r3n06: include_inputs_for_metrics=False, c06r3n06: include_num_input_tokens_seen=False, c06r3n06: include_tokens_per_second=False, c06r3n06: jit_mode_eval=False, c06r3n06: label_names=None, c06r3n06: label_smoothing_factor=0.0, c06r3n06: learning_rate=5e-05, c06r3n06: length_column_name=length, c06r3n06: load_best_model_at_end=True, c06r3n06: local_rank=3, c06r3n06: log_level=passive, c06r3n06: log_level_replica=warning, c06r3n06: log_on_each_node=True, c06r3n06: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n06, c06r3n06: logging_first_step=False, c06r3n06: logging_nan_inf_filter=True, c06r3n06: logging_steps=10, c06r3n06: logging_strategy=steps, c06r3n06: lr_scheduler_kwargs={}, c06r3n06: lr_scheduler_type=cosine, c06r3n06: max_grad_norm=0.5, c06r3n06: max_steps=-1, c06r3n06: metric_for_best_model=loss, c06r3n06: mp_parameters=, c06r3n06: neftune_noise_alpha=None, c06r3n06: no_cuda=False, c06r3n06: num_train_epochs=4.0, c06r3n06: optim=adamw_torch, c06r3n06: optim_args=None, c06r3n06: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n06: overwrite_output_dir=False, c06r3n06: past_index=-1, c06r3n06: per_device_eval_batch_size=1, c06r3n06: per_device_train_batch_size=1, c06r3n06: predict_with_generate=False, c06r3n06: prediction_loss_only=False, c06r3n06: push_to_hub=False, c06r3n06: push_to_hub_model_id=None, c06r3n06: push_to_hub_organization=None, c06r3n06: push_to_hub_token=, c06r3n06: ray_scope=last, c06r3n06: remove_unused_columns=True, c06r3n06: report_to=['tensorboard'], c06r3n06: resume_from_checkpoint=None, c06r3n06: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n06: save_on_each_node=True, c06r3n06: save_only_model=False, c06r3n06: save_safetensors=True, c06r3n06: save_steps=100, c06r3n06: save_strategy=steps, c06r3n06: save_total_limit=None, c06r3n06: seed=42, c06r3n06: skip_memory_metrics=True, c06r3n06: sortish_sampler=False, c06r3n06: split_batches=False, c06r3n06: tf32=None, c06r3n06: torch_compile=False, c06r3n06: torch_compile_backend=None, c06r3n06: torch_compile_mode=None, c06r3n06: torchdynamo=None, c06r3n06: tpu_metrics_debug=False, c06r3n06: tpu_num_cores=None, c06r3n06: use_cpu=False, c06r3n06: use_ipex=False, c06r3n06: use_legacy_prediction_loop=False, c06r3n06: use_mps_device=False, c06r3n06: warmup_ratio=0.03, c06r3n06: warmup_steps=0, c06r3n06: weight_decay=0.0, c06r3n06: ) c06r3n06: 03/15/2024 11:10:31 - INFO - llmtuner.hparams.parser - Process rank: 2, device: cuda:2, n_gpu: 1 c06r3n06: distributed training: True, compute dtype: torch.float16 c06r3n06: 03/15/2024 11:10:31 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n06: _n_gpu=1, c06r3n06: adafactor=False, c06r3n06: adam_beta1=0.9, c06r3n06: adam_beta2=0.999, c06r3n06: adam_epsilon=1e-08, c06r3n06: auto_find_batch_size=False, c06r3n06: bf16=False, c06r3n06: bf16_full_eval=False, c06r3n06: data_seed=None, c06r3n06: dataloader_drop_last=False, c06r3n06: dataloader_num_workers=0, c06r3n06: dataloader_persistent_workers=False, c06r3n06: dataloader_pin_memory=True, c06r3n06: ddp_backend=None, c06r3n06: ddp_broadcast_buffers=None, c06r3n06: ddp_bucket_cap_mb=None, c06r3n06: ddp_find_unused_parameters=None, c06r3n06: ddp_timeout=1800, c06r3n06: debug=[], c06r3n06: deepspeed=deepspeed.json, c06r3n06: disable_tqdm=False, c06r3n06: dispatch_batches=None, c06r3n06: do_eval=True, c06r3n06: do_predict=False, c06r3n06: do_train=True, c06r3n06: eval_accumulation_steps=None, c06r3n06: eval_delay=0, c06r3n06: eval_steps=100, c06r3n06: evaluation_strategy=steps, c06r3n06: fp16=True, c06r3n06: fp16_backend=auto, c06r3n06: fp16_full_eval=False, c06r3n06: fp16_opt_level=O1, c06r3n06: fsdp=[], c06r3n06: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n06: fsdp_min_num_params=0, c06r3n06: fsdp_transformer_layer_cls_to_wrap=None, c06r3n06: full_determinism=False, c06r3n06: generation_config=None, c06r3n06: generation_max_length=None, c06r3n06: generation_num_beams=None, c06r3n06: gradient_accumulation_steps=1, c06r3n06: gradient_checkpointing=False, c06r3n06: gradient_checkpointing_kwargs=None, c06r3n06: greater_is_better=False, c06r3n06: group_by_length=False, c06r3n06: half_precision_backend=auto, c06r3n06: hub_always_push=False, c06r3n06: hub_model_id=None, c06r3n06: hub_private_repo=False, c06r3n06: hub_strategy=every_save, c06r3n06: hub_token=, c06r3n06: ignore_data_skip=False, c06r3n06: include_inputs_for_metrics=False, c06r3n06: include_num_input_tokens_seen=False, c06r3n06: include_tokens_per_second=False, c06r3n06: jit_mode_eval=False, c06r3n06: label_names=None, c06r3n06: label_smoothing_factor=0.0, c06r3n06: learning_rate=5e-05, c06r3n06: length_column_name=length, c06r3n06: load_best_model_at_end=True, c06r3n06: local_rank=1, c06r3n06: log_level=passive, c06r3n06: log_level_replica=warning, c06r3n06: log_on_each_node=True, c06r3n06: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n06, c06r3n06: logging_first_step=False, c06r3n06: logging_nan_inf_filter=True, c06r3n06: logging_steps=10, c06r3n06: logging_strategy=steps, c06r3n06: lr_scheduler_kwargs={}, c06r3n06: lr_scheduler_type=cosine, c06r3n06: max_grad_norm=0.5, c06r3n06: max_steps=-1, c06r3n06: metric_for_best_model=loss, c06r3n06: mp_parameters=, c06r3n06: neftune_noise_alpha=None, c06r3n06: no_cuda=False, c06r3n06: num_train_epochs=4.0, c06r3n06: optim=adamw_torch, c06r3n06: optim_args=None, c06r3n06: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n06: overwrite_output_dir=False, c06r3n06: past_index=-1, c06r3n06: per_device_eval_batch_size=1, c06r3n06: per_device_train_batch_size=1, c06r3n06: predict_with_generate=False, c06r3n06: prediction_loss_only=False, c06r3n06: push_to_hub=False, c06r3n06: push_to_hub_model_id=None, c06r3n06: push_to_hub_organization=None, c06r3n06: push_to_hub_token=, c06r3n06: ray_scope=last, c06r3n06: remove_unused_columns=True, c06r3n06: report_to=['tensorboard'], c06r3n06: resume_from_checkpoint=None, c06r3n06: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n06: save_on_each_node=True, c06r3n06: save_only_model=False, c06r3n06: save_safetensors=True, c06r3n06: save_steps=100, c06r3n06: save_strategy=steps, c06r3n06: save_total_limit=None, c06r3n06: seed=42, c06r3n06: skip_memory_metrics=True, c06r3n06: sortish_sampler=False, c06r3n06: split_batches=False, c06r3n06: tf32=None, c06r3n06: torch_compile=False, c06r3n06: torch_compile_backend=None, c06r3n06: torch_compile_mode=None, c06r3n06: torchdynamo=None, c06r3n06: tpu_metrics_debug=False, c06r3n06: tpu_num_cores=None, c06r3n06: use_cpu=False, c06r3n06: use_ipex=False, c06r3n06: use_legacy_prediction_loop=False, c06r3n06: use_mps_device=False, c06r3n06: warmup_ratio=0.03, c06r3n06: warmup_steps=0, c06r3n06: weight_decay=0.0, c06r3n06: ) c06r3n06: 03/15/2024 11:10:31 - INFO - llmtuner.hparams.parser - Training/evaluation parameters Seq2SeqTrainingArguments( c06r3n06: _n_gpu=1, c06r3n06: adafactor=False, c06r3n06: adam_beta1=0.9, c06r3n06: adam_beta2=0.999, c06r3n06: adam_epsilon=1e-08, c06r3n06: auto_find_batch_size=False, c06r3n06: bf16=False, c06r3n06: bf16_full_eval=False, c06r3n06: data_seed=None, c06r3n06: dataloader_drop_last=False, c06r3n06: dataloader_num_workers=0, c06r3n06: dataloader_persistent_workers=False, c06r3n06: dataloader_pin_memory=True, c06r3n06: ddp_backend=None, c06r3n06: ddp_broadcast_buffers=None, c06r3n06: ddp_bucket_cap_mb=None, c06r3n06: ddp_find_unused_parameters=None, c06r3n06: ddp_timeout=1800, c06r3n06: debug=[], c06r3n06: deepspeed=deepspeed.json, c06r3n06: disable_tqdm=False, c06r3n06: dispatch_batches=None, c06r3n06: do_eval=True, c06r3n06: do_predict=False, c06r3n06: do_train=True, c06r3n06: eval_accumulation_steps=None, c06r3n06: eval_delay=0, c06r3n06: eval_steps=100, c06r3n06: evaluation_strategy=steps, c06r3n06: fp16=True, c06r3n06: fp16_backend=auto, c06r3n06: fp16_full_eval=False, c06r3n06: fp16_opt_level=O1, c06r3n06: fsdp=[], c06r3n06: fsdp_config={'min_num_params': 0, 'xla': False, 'xla_fsdp_grad_ckpt': False}, c06r3n06: fsdp_min_num_params=0, c06r3n06: fsdp_transformer_layer_cls_to_wrap=None, c06r3n06: full_determinism=False, c06r3n06: generation_config=None, c06r3n06: generation_max_length=None, c06r3n06: generation_num_beams=None, c06r3n06: gradient_accumulation_steps=1, c06r3n06: gradient_checkpointing=False, c06r3n06: gradient_checkpointing_kwargs=None, c06r3n06: greater_is_better=False, c06r3n06: group_by_length=False, c06r3n06: half_precision_backend=auto, c06r3n06: hub_always_push=False, c06r3n06: hub_model_id=None, c06r3n06: hub_private_repo=False, c06r3n06: hub_strategy=every_save, c06r3n06: hub_token=, c06r3n06: ignore_data_skip=False, c06r3n06: include_inputs_for_metrics=False, c06r3n06: include_num_input_tokens_seen=False, c06r3n06: include_tokens_per_second=False, c06r3n06: jit_mode_eval=False, c06r3n06: label_names=None, c06r3n06: label_smoothing_factor=0.0, c06r3n06: learning_rate=5e-05, c06r3n06: length_column_name=length, c06r3n06: load_best_model_at_end=True, c06r3n06: local_rank=2, c06r3n06: log_level=passive, c06r3n06: log_level_replica=warning, c06r3n06: log_on_each_node=True, c06r3n06: logging_dir=/work/share/huchen1/liangjj/llama_factory/runs/Mar15_11-10-30_c06r3n06, c06r3n06: logging_first_step=False, c06r3n06: logging_nan_inf_filter=True, c06r3n06: logging_steps=10, c06r3n06: logging_strategy=steps, c06r3n06: lr_scheduler_kwargs={}, c06r3n06: lr_scheduler_type=cosine, c06r3n06: max_grad_norm=0.5, c06r3n06: max_steps=-1, c06r3n06: metric_for_best_model=loss, c06r3n06: mp_parameters=, c06r3n06: neftune_noise_alpha=None, c06r3n06: no_cuda=False, c06r3n06: num_train_epochs=4.0, c06r3n06: optim=adamw_torch, c06r3n06: optim_args=None, c06r3n06: output_dir=/work/share/huchen1/liangjj/llama_factory, c06r3n06: overwrite_output_dir=False, c06r3n06: past_index=-1, c06r3n06: per_device_eval_batch_size=1, c06r3n06: per_device_train_batch_size=1, c06r3n06: predict_with_generate=False, c06r3n06: prediction_loss_only=False, c06r3n06: push_to_hub=False, c06r3n06: push_to_hub_model_id=None, c06r3n06: push_to_hub_organization=None, c06r3n06: push_to_hub_token=, c06r3n06: ray_scope=last, c06r3n06: remove_unused_columns=True, c06r3n06: report_to=['tensorboard'], c06r3n06: resume_from_checkpoint=None, c06r3n06: run_name=/work/share/huchen1/liangjj/llama_factory, c06r3n06: save_on_each_node=True, c06r3n06: save_only_model=False, c06r3n06: save_safetensors=True, c06r3n06: save_steps=100, c06r3n06: save_strategy=steps, c06r3n06: save_total_limit=None, c06r3n06: seed=42, c06r3n06: skip_memory_metrics=True, c06r3n06: sortish_sampler=False, c06r3n06: split_batches=False, c06r3n06: tf32=None, c06r3n06: torch_compile=False, c06r3n06: torch_compile_backend=None, c06r3n06: torch_compile_mode=None, c06r3n06: torchdynamo=None, c06r3n06: tpu_metrics_debug=False, c06r3n06: tpu_num_cores=None, c06r3n06: use_cpu=False, c06r3n06: use_ipex=False, c06r3n06: use_legacy_prediction_loop=False, c06r3n06: use_mps_device=False, c06r3n06: warmup_ratio=0.03, c06r3n06: warmup_steps=0, c06r3n06: weight_decay=0.0, c06r3n06: ) c06r3n06: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:31,496 >> loading file tokenizer.model c06r3n06: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:31,496 >> loading file added_tokens.json c06r3n06: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:31,496 >> loading file special_tokens_map.json c06r3n06: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:31,496 >> loading file tokenizer_config.json c06r3n06: [INFO|tokenization_utils_base.py:2025] 2024-03-15 11:10:31,496 >> loading file tokenizer.json c06r3n06: You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n06: [WARNING|logging.py:329] 2024-03-15 11:10:31,499 >> You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n06: You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n06: You are using the default legacy behaviour of the . This is expected, and simply means that the `legacy` (previous) behavior will be used so nothing changes for you. If you want to use the new behaviour, set `legacy=False`. This should only be set if you understand what it means, and thoroughly read the reason why this was added as explained in https://github.com/huggingface/transformers/pull/24565 c06r3n06: [INFO|configuration_utils.py:727] 2024-03-15 11:10:31,590 >> loading configuration file /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b/config.json c06r3n06: [INFO|configuration_utils.py:792] 2024-03-15 11:10:31,592 >> Model config LlamaConfig { c06r3n06: "_name_or_path": "/work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b", c06r3n06: "architectures": [ c06r3n06: "LlamaForCausalLM" c06r3n06: ], c06r3n06: "attention_bias": false, c06r3n06: "attention_dropout": 0.0, c06r3n06: "bos_token_id": 0, c06r3n06: "eos_token_id": 1, c06r3n06: "hidden_act": "silu", c06r3n06: "hidden_size": 4096, c06r3n06: "initializer_range": 0.02, c06r3n06: "intermediate_size": 11008, c06r3n06: "max_position_embeddings": 2048, c06r3n06: "max_sequence_length": 2048, c06r3n06: "model_type": "llama", c06r3n06: "num_attention_heads": 32, c06r3n06: "num_hidden_layers": 32, c06r3n06: "num_key_value_heads": 32, c06r3n06: "pad_token_id": -1, c06r3n06: "pretraining_tp": 1, c06r3n06: "rms_norm_eps": 1e-06, c06r3n06: "rope_scaling": null, c06r3n06: "rope_theta": 10000.0, c06r3n06: "tie_word_embeddings": false, c06r3n06: "torch_dtype": "float16", c06r3n06: "transformers_version": "4.37.2", c06r3n06: "use_cache": true, c06r3n06: "vocab_size": 32000 c06r3n06: } c06r3n06: c06r3n09: [INFO|modeling_utils.py:3473] 2024-03-15 11:10:31,941 >> loading weights file /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b/pytorch_model.bin.index.json c06r3n07: [INFO|modeling_utils.py:3473] 2024-03-15 11:10:31,943 >> loading weights file /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b/pytorch_model.bin.index.json c06r3n08: [INFO|modeling_utils.py:3473] 2024-03-15 11:10:31,944 >> loading weights file /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b/pytorch_model.bin.index.json c06r3n09: [INFO|modeling_utils.py:1426] 2024-03-15 11:10:31,963 >> Instantiating LlamaForCausalLM model under default dtype torch.float16. c06r3n07: [INFO|modeling_utils.py:1426] 2024-03-15 11:10:31,964 >> Instantiating LlamaForCausalLM model under default dtype torch.float16. c06r3n09: [INFO|modeling_utils.py:3582] 2024-03-15 11:10:31,963 >> Detected DeepSpeed ZeRO-3: activating zero.init() for this model c06r3n08: [INFO|modeling_utils.py:1426] 2024-03-15 11:10:31,963 >> Instantiating LlamaForCausalLM model under default dtype torch.float16. c06r3n07: [INFO|modeling_utils.py:3582] 2024-03-15 11:10:31,964 >> Detected DeepSpeed ZeRO-3: activating zero.init() for this model c06r3n08: [INFO|modeling_utils.py:3582] 2024-03-15 11:10:31,964 >> Detected DeepSpeed ZeRO-3: activating zero.init() for this model c06r3n06: [INFO|modeling_utils.py:3473] 2024-03-15 11:10:31,966 >> loading weights file /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b/pytorch_model.bin.index.json c06r3n06: [INFO|modeling_utils.py:1426] 2024-03-15 11:10:31,967 >> Instantiating LlamaForCausalLM model under default dtype torch.float16. c06r3n06: [INFO|modeling_utils.py:3582] 2024-03-15 11:10:31,967 >> Detected DeepSpeed ZeRO-3: activating zero.init() for this model c06r3n07: [INFO|configuration_utils.py:826] 2024-03-15 11:10:31,972 >> Generate config GenerationConfig { c06r3n07: "bos_token_id": 0, c06r3n07: "eos_token_id": 1, c06r3n07: "pad_token_id": -1 c06r3n07: } c06r3n07: c06r3n08: [INFO|configuration_utils.py:826] 2024-03-15 11:10:31,972 >> Generate config GenerationConfig { c06r3n08: "bos_token_id": 0, c06r3n08: "eos_token_id": 1, c06r3n08: "pad_token_id": -1 c06r3n08: } c06r3n08: c06r3n09: [INFO|configuration_utils.py:826] 2024-03-15 11:10:31,973 >> Generate config GenerationConfig { c06r3n09: "bos_token_id": 0, c06r3n09: "eos_token_id": 1, c06r3n09: "pad_token_id": -1 c06r3n09: } c06r3n09: c06r3n06: [INFO|configuration_utils.py:826] 2024-03-15 11:10:31,975 >> Generate config GenerationConfig { c06r3n06: "bos_token_id": 0, c06r3n06: "eos_token_id": 1, c06r3n06: "pad_token_id": -1 c06r3n06: } c06r3n06: c06r3n06: pthread_mutex_timedlock() returned 110 c06r3n06: Failed to initialize RSMI device mutex after 5 seconds. Previous execution may not have shutdown cleanly. To fix problem, stop all rocm_smi programs, and then delete the rocm_smi* shared memory files in /dev/shm.: Success c06r3n06: pthread_mutex_timedlock() returned 110 c06r3n06: Failed to initialize RSMI device mutex after 5 seconds. Previous execution may not have shutdown cleanly. To fix problem, stop all rocm_smi programs, and then delete the rocm_smi* shared memory files in /dev/shm.: Success c06r3n06: pthread_mutex_timedlock() returned 110 c06r3n06: pthread_mutex_timedlock() returned 110 c06r3n06: Failed to initialize RSMI device mutex after 5 seconds. Previous execution may not have shutdown cleanly. To fix problem, stop all rocm_smi programs, and then delete the rocm_smi* shared memory files in /dev/shm.: Success c06r3n06: Failed to initialize RSMI device mutex after 5 seconds. Previous execution may not have shutdown cleanly. To fix problem, stop all rocm_smi programs, and then delete the rocm_smi* shared memory files in /dev/shm.: Success c06r3n06: I0315 11:10:38.707123 15477 ProcessGroupNCCL.cpp:1340] NCCL_DEBUG: N/A c06r3n06: [2024-03-15 11:10:43,151] [INFO] [partition_parameters.py:348:__exit__] finished initializing model - num_params = 291, num_elems = 6.74B c06r3n08: Loading checkpoint shards: 0%| | 0/33 [00:00> All model checkpoint weights were used when initializing LlamaForCausalLM. c06r3n09: c06r3n09: [INFO|modeling_utils.py:4358] 2024-03-15 11:11:10,242 >> All the weights of LlamaForCausalLM were initialized from the model checkpoint at /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b. c06r3n09: If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training. c06r3n07: 00:09<00:19, 1.18it/s] Loading checkpoint shards: 30%|███ | 10/33 [00:09<00:19, 1.18it/s] Loading checkpoint shards: 30%|███ | 10/33 [00:09<00:19, 1.16it/s] Loading checkpoint shards: 33%|███▎ | 11/33 [00:10<00:18, 1.21it/s] Loading checkpoint shards: 33%|███▎ | 11/33 [00:10<00:18, 1.21it/s] Loading checkpoint shards: 33%|███▎ | 11/33 [00:09<00:18, 1.21it/s] Loading checkpoint shards: 33%|███▎ | 11/33 [00:10<00:18, 1.20it/s] Loading checkpoint shards: 36%|███▋ | 12/33 [00:10<00:17, 1.22it/s] Loading checkpoint shards: 36%|███▋ | 12/33 [00:10<00:17, 1.21it/s] Loading checkpoint shards: 36%|███▋ | 12/33 [00:10<00:17, 1.19it/s] Loading checkpoint shards: 36%|███▋ | 12/33 [00:10<00:17, 1.21it/s] Loading checkpoint shards: 39%|███▉ | 13/33 [00:11<00:16, 1.23it/s] Loading checkpoint shards: 39%|███▉ | 13/33 [00:11<00:16, 1.25it/s] Loading checkpoint shards: 39%|███▉ | 13/33 [00:11<00:16, 1.23it/s] Loading checkpoint shards: 39%|███▉ | 13/33 [00:11<00:16, 1.21it/s] Loading checkpoint shards: 42%|████▏ | 14/33 [00:12<00:15, 1.26it/s] Loading checkpoint shards: 42%|████▏ | 14/33 [00:12<00:15, 1.26it/s] Loading checkpoint shards: 42%|████▏ | 14/33 [00:12<00:15, 1.24it/s] Loading checkpoint shards: 42%|████▏ | 14/33 [00:12<00:15, 1.24it/s] Loading checkpoint shards: 45%|████▌ | 15/33 [00:13<00:14, 1.22it/s] Loading checkpoint shards: 45%|████▌ | 15/33 [00:13<00:14, 1.22it/s] Loading checkpoint shards: 45%|████▌ | 15/33 [00:13<00:14, 1.23it/s] Loading checkpoint shards: 45%|████▌ | 15/33 [00:13<00:14, 1.22it/s] Loading checkpoint shards: 48%|████▊ | 16/33 [00:14<00:13, 1.25it/s] Loading checkpoint shards: 48%|████▊ | 16/33 [00:14<00:13, 1.24it/s] Loading checkpoint shards: 48%|████▊ | 16/33 [00:14<00:13, 1.24it/s] Loading checkpoint shards: 48%|████▊ | 16/33 [00:14<00:13, 1.23it/s] Loading checkpoint shards: 52%|█████▏ | 17/33 [00:14<00:12, 1.30it/s] Loading checkpoint shards: 52%|█████▏ | 17/33 [00:14<00:12, 1.31it/s] Loading checkpoint shards: 52%|█████▏ | 17/33 [00:14<00:12, 1.30it/s] Loading checkpoint shards: 52%|█████▏ | 17/33 [00:14<00:12, 1.29it/s] Loading checkpoint shards: 55%|█████▍ | 18/33 [00:15<00:11, 1.31it/s] Loading checkpoint shards: 55%|█████▍ | 18/33 [00:15<00:11, 1.30it/s] Loading checkpoint shards: 55%|█████▍ | 18/33 [00:15<00:11, 1.31it/s] Loading checkpoint shards: 55%|█████▍ | 18/33 [00:15<00:11, 1.27it/s] Loading checkpoint shards: 58%|█████▊ | 19/33 [00:16<00:10, 1.34it/s] Loading checkpoint shards: 58%|█████▊ | 19/33 [00:16<00:10, 1.34it/s] Loading checkpoint shards: 58%|█████▊ | 19/33 [00:16<00:10, 1.33it/s] Loading checkpoint shards: 58%|█████▊ | 19/33 [00:16<00:10, 1.35it/s] Loading checkpoint shards: 61%|██████ | 20/33 [00:16<00:09, 1.33it/s] Loading checkpoint shards: 61%|██████ | 20/33 [00:16<00:09, 1.33it/s] Loading checkpoint shards: 61%|██████ | 20/33 [00:16<00:09, 1.34it/s] Loading checkpoint shards: 61%|██████ | 20/33 [00:16<00:09, 1.32it/s] Loading checkpoint shards: 64%|██████▎ | 21/33 [00:17<00:08, 1.34it/s] Loading checkpoint shards: 64%|██████▎ | 21/33 [00:17<00:08, 1.34it/s] Loading checkpoint shards: 64%|██████▎ | 21/33 [00:17<00:09, 1.33it/s] Loading checkpoint shards: 64%|██████▎ | 21/33 [00:17<00:08, 1.34it/s] Loading checkpoint shards: 67%|██████▋ | 22/33 [00:18<00:08, 1.36it/s] Loading checkpoint shards: 67%|██████▋ | 22/33 [00:18<00:08, 1.36it/s] Loading checkpoint shards: 67%|██████▋ | 22/33 [00:18<00:08, 1.36it/s] Loading checkpoint shards: 67%|██████▋ | 22/33 [00:18<00:08, 1.37it/s] Loading checkpoint shards: 70%|██████▉ | 23/33 [00:19<00:07, 1.36it/s] Loading checkpoint shards: 70%|██████▉ | 23/33 [00:19<00:07, 1.36it/s] Loading checkpoint shards: 70%|██████▉ | 23/33 [00:19<00:07, 1.37it/s] Loading checkpoint shards: 70%|██████▉ | 23/33 [00:19<00:07, 1.35it/s] Loading checkpoint shards: 73%|███████▎ | 24/33 [00:19<00:06, 1.33it/s] Loading checkpoint shards: 73%|███████▎ | 24/33 [00:19<00:06, 1.33it/s] Loading checkpoint shards: 73%|███████▎ | 24/33 [00:19<00:06, 1.33it/s] Loading checkpoint shards: 73%|███████▎ | 24/33 [00:19<00:06, 1.34it/s] Loading checkpoint shards: 76%|███████▌ | 25/33 [00:20<00:05, 1.37it/s] Loading checkpoint shards: 76%|███████▌ | 25/33 [00:20<00:05, 1.37it/s] Loading checkpoint shards: 76%|███████▌ | 25/33 [00:20<00:05, 1.36it/s] Loading checkpoint shards: 76%|███████▌ | 25/33 [00:20<00:05, 1.36it/s] Loading checkpoint shards: 79%|███████▉ | 26/33 [00:21<00:05, 1.39it/s] Loading checkpoint shards: 79%|███████▉ | 26/33 [00:21<00:05, 1.39it/s] Loading checkpoint shards: 79%|███████▉ | 26/33 [00:21<00:05, 1.39it/s] Loading checkpoint shards: 79%|███████▉ | 26/33 [00:21<00:05, 1.39it/s] Loading checkpoint shards: 82%|████████▏ | 27/33 [00:22<00:04, 1.37it/s] Loading checkpoint shards: 82%|████████▏ | 27/33 [00:22<00:04, 1.37it/s] Loading checkpoint shards: 82%|████████▏ | 27/33 [00:22<00:04, 1.37it/s] Loading checkpoint shards: 82%|████████▏ | 27/33 [00:22<00:04, 1.35it/s] Loading checkpoint shards: 85%|████████▍ | 28/33 [00:22<00:03, 1.33it/s] Loading checkpoint shards: 85%|████████▍ | 28/33 [00:22<00:03, 1.33it/s] Loading checkpoint shards: 85%|████████▍ | 28/33 [00:22<00:03, 1.33it/s] Loading checkpoint shards: 85%|████████▍ | 28/33 [00:22<00:03, 1.33it/s] Loading checkpoint shards: 88%|████████▊ | 29/33 [00:23<00:03, 1.33it/s] Loading checkpoint shards: 88%|████████▊ | 29/33 [00:23<00:02, 1.33it/s] Loading checkpoint shards: 88%|████████▊ | 29/33 [00:23<00:03, 1.33it/s] Loading checkpoint shards: 88%|████████▊ | 29/33 [00:23<00:03, 1.33it/s] Loading checkpoint shards: 91%|█████████ | 30/33 [00:24<00:02, 1.29it/s] Loading checkpoint shards: 91%|█████████ | 30/33 [00:24<00:02, 1.29it/s] Loading checkpoint shards: 91%|█████████ | 30/33 [00:24<00:02, 1.29it/s] Loading checkpoint shards: 91%|█████████ | 30/33 [00:24<00:02, 1.29it/s] Loading checkpoint shards: 94%|█████████▍| 31/33 [00:25<00:01, 1.29it/s] Loading checkpoint shards: 94%|█████████▍| 31/33 [00:25<00:01, 1.29it/s] Loading checkpoint shards: 94%|█████████▍| 31/33 [00:25<00:01, 1.29it/s] Loading checkpoint shards: 94%|█████████▍| 31/33 [00:25<00:01, 1.29it/s] Loading checkpoint shards: 97%|█████████▋| 32/33 [00:25<00:00, 1.33it/s] Loading checkpoint shards: 97%|█████████▋| 32/33 [00:25<00:00, 1.33it/s] Loading checkpoint shards: 97%|█████████▋| 32/33 [00:25<00:00, 1.33it/s] Loading checkpoint shards: 97%|█████████▋| 32/33 [00:25<00:00, 1.33it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.16it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s]c06r3n09: Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.17it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s] c06r3n09: Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.16it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s] c06r3n06: 00:09<00:19, 1.19it/s] Loading checkpoint shards: 30%|███ | 10/33 [00:09<00:19, 1.17it/s] Loading checkpoint shards: 30%|███ | 10/33 [00:09<00:20, 1.11it/s] Loading checkpoint shards: 33%|███▎ | 11/33 [00:09<00:18, 1.22it/s] Loading checkpoint shards: 33%|███▎ | 11/33 [00:10<00:17, 1.22it/s] Loading checkpoint shards: 33%|███▎ | 11/33 [00:10<00:17, 1.23it/s] Loading checkpoint shards: 33%|███▎ | 11/33 [00:10<00:18, 1.17it/s] Loading checkpoint shards: 36%|███▋ | 12/33 [00:10<00:17, 1.22it/s] Loading checkpoint shards: 36%|███▋ | 12/33 [00:10<00:17, 1.22it/s] Loading checkpoint shards: 36%|███▋ | 12/33 [00:10<00:17, 1.18it/s] Loading checkpoint shards: 36%|███▋ | 12/33 [00:11<00:18, 1.15it/s] Loading checkpoint shards: 39%|███▉ | 13/33 [00:11<00:16, 1.24it/s] Loading checkpoint shards: 39%|███▉ | 13/33 [00:11<00:16, 1.24it/s] Loading checkpoint shards: 39%|███▉ | 13/33 [00:11<00:16, 1.22it/s] Loading checkpoint shards: 39%|███▉ | 13/33 [00:12<00:16, 1.19it/s] Loading checkpoint shards: 42%|████▏ | 14/33 [00:12<00:15, 1.26it/s] Loading checkpoint shards: 42%|████▏ | 14/33 [00:12<00:14, 1.27it/s] Loading checkpoint shards: 42%|████▏ | 14/33 [00:12<00:15, 1.26it/s] Loading checkpoint shards: 42%|████▏ | 14/33 [00:12<00:15, 1.22it/s] Loading checkpoint shards: 45%|████▌ | 15/33 [00:13<00:14, 1.22it/s] Loading checkpoint shards: 45%|████▌ | 15/33 [00:13<00:14, 1.24it/s] Loading checkpoint shards: 45%|████▌ | 15/33 [00:13<00:14, 1.23it/s] Loading checkpoint shards: 45%|████▌ | 15/33 [00:13<00:15, 1.19it/s] Loading checkpoint shards: 48%|████▊ | 16/33 [00:13<00:13, 1.25it/s] Loading checkpoint shards: 48%|████▊ | 16/33 [00:14<00:13, 1.26it/s] Loading checkpoint shards: 48%|████▊ | 16/33 [00:13<00:13, 1.26it/s] Loading checkpoint shards: 48%|████▊ | 16/33 [00:14<00:13, 1.24it/s] Loading checkpoint shards: 52%|█████▏ | 17/33 [00:14<00:12, 1.29it/s] Loading checkpoint shards: 52%|█████▏ | 17/33 [00:14<00:12, 1.30it/s] Loading checkpoint shards: 52%|█████▏ | 17/33 [00:14<00:12, 1.29it/s] Loading checkpoint shards: 52%|█████▏ | 17/33 [00:15<00:12, 1.25it/s] Loading checkpoint shards: 55%|█████▍ | 18/33 [00:15<00:11, 1.30it/s] Loading checkpoint shards: 55%|█████▍ | 18/33 [00:15<00:11, 1.30it/s] Loading checkpoint shards: 55%|█████▍ | 18/33 [00:15<00:11, 1.31it/s] Loading checkpoint shards: 55%|█████▍ | 18/33 [00:15<00:11, 1.32it/s] Loading checkpoint shards: 58%|█████▊ | 19/33 [00:16<00:10, 1.32it/s] Loading checkpoint shards: 58%|█████▊ | 19/33 [00:16<00:10, 1.33it/s] Loading checkpoint shards: 58%|█████▊ | 19/33 [00:16<00:10, 1.31it/s] Loading checkpoint shards: 58%|█████▊ | 19/33 [00:16<00:10, 1.32it/s] Loading checkpoint shards: 61%|██████ | 20/33 [00:16<00:09, 1.34it/s] Loading checkpoint shards: 61%|██████ | 20/33 [00:16<00:09, 1.34it/s] Loading checkpoint shards: 61%|██████ | 20/33 [00:16<00:09, 1.34it/s] Loading checkpoint shards: 61%|██████ | 20/33 [00:17<00:09, 1.32it/s] Loading checkpoint shards: 64%|██████▎ | 21/33 [00:17<00:08, 1.35it/s] Loading checkpoint shards: 64%|██████▎ | 21/33 [00:17<00:08, 1.35it/s] Loading checkpoint shards: 64%|██████▎ | 21/33 [00:17<00:08, 1.35it/s] Loading checkpoint shards: 64%|██████▎ | 21/33 [00:18<00:09, 1.27it/s] Loading checkpoint shards: 67%|██████▋ | 22/33 [00:18<00:08, 1.36it/s] Loading checkpoint shards: 67%|██████▋ | 22/33 [00:18<00:08, 1.36it/s] Loading checkpoint shards: 67%|██████▋ | 22/33 [00:18<00:08, 1.36it/s] Loading checkpoint shards: 67%|██████▋ | 22/33 [00:18<00:07, 1.38it/s] Loading checkpoint shards: 70%|██████▉ | 23/33 [00:19<00:07, 1.37it/s] Loading checkpoint shards: 70%|██████▉ | 23/33 [00:19<00:07, 1.37it/s] Loading checkpoint shards: 70%|██████▉ | 23/33 [00:19<00:07, 1.36it/s] Loading checkpoint shards: 70%|██████▉ | 23/33 [00:19<00:07, 1.37it/s] Loading checkpoint shards: 73%|███████▎ | 24/33 [00:19<00:06, 1.32it/s] Loading checkpoint shards: 73%|███████▎ | 24/33 [00:19<00:06, 1.32it/s] Loading checkpoint shards: 73%|███████▎ | 24/33 [00:19<00:06, 1.31it/s] Loading checkpoint shards: 73%|███████▎ | 24/33 [00:20<00:06, 1.36it/s] Loading checkpoint shards: 76%|███████▌ | 25/33 [00:20<00:05, 1.37it/s] Loading checkpoint shards: 76%|███████▌ | 25/33 [00:20<00:05, 1.37it/s] Loading checkpoint shards: 76%|███████▌ | 25/33 [00:20<00:05, 1.37it/s] Loading checkpoint shards: 76%|███████▌ | 25/33 [00:20<00:05, 1.42it/s] Loading checkpoint shards: 79%|███████▉ | 26/33 [00:21<00:05, 1.38it/s] Loading checkpoint shards: 79%|███████▉ | 26/33 [00:21<00:05, 1.38it/s] Loading checkpoint shards: 79%|███████▉ | 26/33 [00:21<00:05, 1.37it/s] Loading checkpoint shards: 79%|███████▉ | 26/33 [00:21<00:04, 1.40it/s] Loading checkpoint shards: 82%|████████▏ | 27/33 [00:22<00:04, 1.36it/s] Loading checkpoint shards: 82%|████████▏ | 27/33 [00:22<00:04, 1.36it/s] Loading checkpoint shards: 82%|████████▏ | 27/33 [00:22<00:04, 1.35it/s] Loading checkpoint shards: 82%|████████▏ | 27/33 [00:22<00:04, 1.37it/s] Loading checkpoint shards: 85%|████████▍ | 28/33 [00:22<00:03, 1.33it/s] Loading checkpoint shards: 85%|████████▍ | 28/33 [00:22<00:03, 1.33it/s] Loading checkpoint shards: 85%|████████▍ | 28/33 [00:22<00:03, 1.33it/s] Loading checkpoint shards: 85%|████████▍ | 28/33 [00:23<00:03, 1.33it/s] Loading checkpoint shards: 88%|████████▊ | 29/33 [00:23<00:03, 1.33it/s] Loading checkpoint shards: 88%|████████▊ | 29/33 [00:23<00:03, 1.33it/s] Loading checkpoint shards: 88%|████████▊ | 29/33 [00:23<00:03, 1.30it/s] Loading checkpoint shards: 88%|████████▊ | 29/33 [00:23<00:03, 1.32it/s] Loading checkpoint shards: 91%|█████████ | 30/33 [00:24<00:02, 1.28it/s] Loading checkpoint shards: 91%|█████████ | 30/33 [00:24<00:02, 1.29it/s] Loading checkpoint shards: 91%|█████████ | 30/33 [00:24<00:02, 1.26it/s] Loading checkpoint shards: 91%|█████████ | 30/33 [00:24<00:02, 1.30it/s] Loading checkpoint shards: 94%|█████████▍| 31/33 [00:25<00:01, 1.31it/s] Loading checkpoint shards: 94%|█████████▍| 31/33 [00:25<00:01, 1.29it/s] Loading checkpoint shards: 94%|█████████▍| 31/33 [00:25<00:01, 1.27it/s] Loading checkpoint shards: 94%|█████████▍| 31/33 [00:25<00:01, 1.26it/s] Loading checkpoint shards: 97%|█████████▋| 32/33 [00:25<00:00, 1.34it/s] Loading checkpoint shards: 97%|█████████▋| 32/33 [00:25<00:00, 1.33it/s] Loading checkpoint shards: 97%|█████████▋| 32/33 [00:25<00:00, 1.32it/s] Loading checkpoint shards: 97%|█████████▋| 32/33 [00:26<00:00, 1.33it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.18it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s]c06r3n07: Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.16it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s] c06r3n06: Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.16it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s] c06r3n06: Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.16it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s] c06r3n08: 00:09<00:19, 1.16it/s] Loading checkpoint shards: 30%|███ | 10/33 [00:09<00:19, 1.16it/s] Loading checkpoint shards: 30%|███ | 10/33 [00:09<00:19, 1.17it/s] Loading checkpoint shards: 33%|███▎ | 11/33 [00:09<00:17, 1.23it/s] Loading checkpoint shards: 33%|███▎ | 11/33 [00:09<00:17, 1.23it/s] Loading checkpoint shards: 33%|███▎ | 11/33 [00:09<00:17, 1.23it/s] Loading checkpoint shards: 33%|███▎ | 11/33 [00:09<00:17, 1.23it/s] Loading checkpoint shards: 36%|███▋ | 12/33 [00:10<00:17, 1.21it/s] Loading checkpoint shards: 36%|███▋ | 12/33 [00:10<00:17, 1.20it/s] Loading checkpoint shards: 36%|███▋ | 12/33 [00:10<00:17, 1.20it/s] Loading checkpoint shards: 36%|███▋ | 12/33 [00:10<00:17, 1.20it/s] Loading checkpoint shards: 39%|███▉ | 13/33 [00:11<00:16, 1.22it/s] Loading checkpoint shards: 39%|███▉ | 13/33 [00:11<00:16, 1.22it/s] Loading checkpoint shards: 39%|███▉ | 13/33 [00:11<00:16, 1.21it/s] Loading checkpoint shards: 39%|███▉ | 13/33 [00:11<00:16, 1.22it/s] Loading checkpoint shards: 42%|████▏ | 14/33 [00:12<00:15, 1.26it/s] Loading checkpoint shards: 42%|████▏ | 14/33 [00:12<00:15, 1.26it/s] Loading checkpoint shards: 42%|████▏ | 14/33 [00:12<00:15, 1.26it/s] Loading checkpoint shards: 42%|████▏ | 14/33 [00:12<00:15, 1.26it/s] Loading checkpoint shards: 45%|████▌ | 15/33 [00:13<00:14, 1.24it/s] Loading checkpoint shards: 45%|████▌ | 15/33 [00:13<00:14, 1.24it/s] Loading checkpoint shards: 45%|████▌ | 15/33 [00:13<00:14, 1.24it/s] Loading checkpoint shards: 45%|████▌ | 15/33 [00:13<00:14, 1.23it/s] Loading checkpoint shards: 48%|████▊ | 16/33 [00:13<00:13, 1.25it/s] Loading checkpoint shards: 48%|████▊ | 16/33 [00:13<00:13, 1.25it/s] Loading checkpoint shards: 48%|████▊ | 16/33 [00:13<00:13, 1.25it/s] Loading checkpoint shards: 48%|████▊ | 16/33 [00:13<00:13, 1.25it/s] Loading checkpoint shards: 52%|█████▏ | 17/33 [00:14<00:12, 1.28it/s] Loading checkpoint shards: 52%|█████▏ | 17/33 [00:14<00:12, 1.28it/s] Loading checkpoint shards: 52%|█████▏ | 17/33 [00:14<00:12, 1.28it/s] Loading checkpoint shards: 52%|█████▏ | 17/33 [00:14<00:12, 1.28it/s] Loading checkpoint shards: 55%|█████▍ | 18/33 [00:15<00:11, 1.31it/s] Loading checkpoint shards: 55%|█████▍ | 18/33 [00:15<00:11, 1.31it/s] Loading checkpoint shards: 55%|█████▍ | 18/33 [00:15<00:11, 1.31it/s] Loading checkpoint shards: 55%|█████▍ | 18/33 [00:15<00:11, 1.30it/s] Loading checkpoint shards: 58%|█████▊ | 19/33 [00:16<00:10, 1.32it/s] Loading checkpoint shards: 58%|█████▊ | 19/33 [00:16<00:10, 1.32it/s] Loading checkpoint shards: 58%|█████▊ | 19/33 [00:16<00:10, 1.32it/s] Loading checkpoint shards: 58%|█████▊ | 19/33 [00:16<00:10, 1.31it/s] Loading checkpoint shards: 61%|██████ | 20/33 [00:16<00:09, 1.34it/s] Loading checkpoint shards: 61%|██████ | 20/33 [00:16<00:09, 1.34it/s] Loading checkpoint shards: 61%|██████ | 20/33 [00:16<00:09, 1.34it/s] Loading checkpoint shards: 61%|██████ | 20/33 [00:16<00:09, 1.32it/s] Loading checkpoint shards: 64%|██████▎ | 21/33 [00:17<00:08, 1.35it/s] Loading checkpoint shards: 64%|██████▎ | 21/33 [00:17<00:08, 1.35it/s] Loading checkpoint shards: 64%|██████▎ | 21/33 [00:17<00:08, 1.34it/s] Loading checkpoint shards: 64%|██████▎ | 21/33 [00:17<00:08, 1.34it/s] Loading checkpoint shards: 67%|██████▋ | 22/33 [00:18<00:08, 1.36it/s] Loading checkpoint shards: 67%|██████▋ | 22/33 [00:18<00:08, 1.35it/s] Loading checkpoint shards: 67%|██████▋ | 22/33 [00:18<00:08, 1.35it/s] Loading checkpoint shards: 67%|██████▋ | 22/33 [00:18<00:08, 1.34it/s] Loading checkpoint shards: 70%|██████▉ | 23/33 [00:19<00:07, 1.35it/s] Loading checkpoint shards: 70%|██████▉ | 23/33 [00:19<00:07, 1.34it/s] Loading checkpoint shards: 70%|██████▉ | 23/33 [00:19<00:07, 1.34it/s] Loading checkpoint shards: 70%|██████▉ | 23/33 [00:19<00:07, 1.32it/s] Loading checkpoint shards: 73%|███████▎ | 24/33 [00:19<00:06, 1.34it/s] Loading checkpoint shards: 73%|███████▎ | 24/33 [00:19<00:06, 1.35it/s] Loading checkpoint shards: 73%|███████▎ | 24/33 [00:19<00:06, 1.34it/s] Loading checkpoint shards: 73%|███████▎ | 24/33 [00:19<00:06, 1.33it/s] Loading checkpoint shards: 76%|███████▌ | 25/33 [00:20<00:05, 1.35it/s] Loading checkpoint shards: 76%|███████▌ | 25/33 [00:20<00:05, 1.35it/s] Loading checkpoint shards: 76%|███████▌ | 25/33 [00:20<00:05, 1.34it/s] Loading checkpoint shards: 76%|███████▌ | 25/33 [00:20<00:06, 1.33it/s] Loading checkpoint shards: 79%|███████▉ | 26/33 [00:21<00:05, 1.39it/s] Loading checkpoint shards: 79%|███████▉ | 26/33 [00:21<00:05, 1.38it/s] Loading checkpoint shards: 79%|███████▉ | 26/33 [00:21<00:05, 1.39it/s] Loading checkpoint shards: 79%|███████▉ | 26/33 [00:21<00:05, 1.38it/s] Loading checkpoint shards: 82%|████████▏ | 27/33 [00:22<00:04, 1.38it/s] Loading checkpoint shards: 82%|████████▏ | 27/33 [00:22<00:04, 1.37it/s] Loading checkpoint shards: 82%|████████▏ | 27/33 [00:22<00:04, 1.36it/s] Loading checkpoint shards: 82%|████████▏ | 27/33 [00:22<00:04, 1.37it/s] Loading checkpoint shards: 85%|████████▍ | 28/33 [00:22<00:03, 1.35it/s] Loading checkpoint shards: 85%|████████▍ | 28/33 [00:22<00:03, 1.33it/s] Loading checkpoint shards: 85%|████████▍ | 28/33 [00:22<00:03, 1.33it/s] Loading checkpoint shards: 85%|████████▍ | 28/33 [00:22<00:03, 1.33it/s] Loading checkpoint shards: 88%|████████▊ | 29/33 [00:23<00:02, 1.35it/s] Loading checkpoint shards: 88%|████████▊ | 29/33 [00:23<00:02, 1.34it/s] Loading checkpoint shards: 88%|████████▊ | 29/33 [00:23<00:02, 1.34it/s] Loading checkpoint shards: 88%|████████▊ | 29/33 [00:23<00:02, 1.34it/s] Loading checkpoint shards: 91%|█████████ | 30/33 [00:24<00:02, 1.29it/s] Loading checkpoint shards: 91%|█████████ | 30/33 [00:24<00:02, 1.29it/s] Loading checkpoint shards: 91%|█████████ | 30/33 [00:24<00:02, 1.28it/s] Loading checkpoint shards: 91%|█████████ | 30/33 [00:24<00:02, 1.29it/s] Loading checkpoint shards: 94%|█████████▍| 31/33 [00:25<00:01, 1.30it/s] Loading checkpoint shards: 94%|█████████▍| 31/33 [00:25<00:01, 1.30it/s] Loading checkpoint shards: 94%|█████████▍| 31/33 [00:25<00:01, 1.29it/s] Loading checkpoint shards: 94%|█████████▍| 31/33 [00:25<00:01, 1.29it/s] Loading checkpoint shards: 97%|█████████▋| 32/33 [00:25<00:00, 1.34it/s] Loading checkpoint shards: 97%|█████████▋| 32/33 [00:25<00:00, 1.34it/s] Loading checkpoint shards: 97%|█████████▋| 32/33 [00:25<00:00, 1.35it/s] Loading checkpoint shards: 97%|█████████▋| 32/33 [00:25<00:00, 1.34it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.16it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s]c06r3n07: Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.16it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s] c06r3n07: Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.16it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s] c06r3n07: [INFO|modeling_utils.py:4350] 2024-03-15 11:11:10,258 >> All model checkpoint weights were used when initializing LlamaForCausalLM. c06r3n07: c06r3n07: [INFO|modeling_utils.py:4358] 2024-03-15 11:11:10,258 >> All the weights of LlamaForCausalLM were initialized from the model checkpoint at /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b. c06r3n07: If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training. c06r3n09: [INFO|configuration_utils.py:779] 2024-03-15 11:11:10,257 >> loading configuration file /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b/generation_config.json c06r3n09: [INFO|configuration_utils.py:826] 2024-03-15 11:11:10,258 >> Generate config GenerationConfig { c06r3n09: "bos_token_id": 0, c06r3n09: "eos_token_id": 1, c06r3n09: "pad_token_id": 0 c06r3n09: } c06r3n09: c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n08: Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.16it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s] c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n07: [INFO|configuration_utils.py:779] 2024-03-15 11:11:10,263 >> loading configuration file /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b/generation_config.json c06r3n07: [INFO|configuration_utils.py:826] 2024-03-15 11:11:10,263 >> Generate config GenerationConfig { c06r3n07: "bos_token_id": 0, c06r3n07: "eos_token_id": 1, c06r3n07: "pad_token_id": 0 c06r3n07: } c06r3n07: c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n08: Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.16it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s] c06r3n08: [INFO|modeling_utils.py:4350] 2024-03-15 11:11:10,264 >> All model checkpoint weights were used when initializing LlamaForCausalLM. c06r3n08: c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n08: [INFO|modeling_utils.py:4358] 2024-03-15 11:11:10,264 >> All the weights of LlamaForCausalLM were initialized from the model checkpoint at /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b. c06r3n08: If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training. c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n08: Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.16it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s] c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n08: [INFO|configuration_utils.py:779] 2024-03-15 11:11:10,269 >> loading configuration file /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b/generation_config.json c06r3n08: [INFO|configuration_utils.py:826] 2024-03-15 11:11:10,269 >> Generate config GenerationConfig { c06r3n08: "bos_token_id": 0, c06r3n08: "eos_token_id": 1, c06r3n08: "pad_token_id": 0 c06r3n08: } c06r3n08: c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n08: 03/15/2024 11:11:10 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n09: 03/15/2024 11:11:10 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n07: 03/15/2024 11:11:10 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n06: Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.28it/s] Loading checkpoint shards: 100%|██████████| 33/33 [00:27<00:00, 1.22it/s] c06r3n06: [INFO|modeling_utils.py:4350] 2024-03-15 11:11:10,350 >> All model checkpoint weights were used when initializing LlamaForCausalLM. c06r3n06: c06r3n06: [INFO|modeling_utils.py:4358] 2024-03-15 11:11:10,350 >> All the weights of LlamaForCausalLM were initialized from the model checkpoint at /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b. c06r3n06: If your task is similar to the task the model of the checkpoint was trained on, you can already use LlamaForCausalLM for predictions without further training. c06r3n06: [INFO|configuration_utils.py:779] 2024-03-15 11:11:10,357 >> loading configuration file /work/home/liangjing/.cache/modelscope/hub/skyline2006/llama-7b/generation_config.json c06r3n06: [INFO|configuration_utils.py:826] 2024-03-15 11:11:10,357 >> Generate config GenerationConfig { c06r3n06: "bos_token_id": 0, c06r3n06: "eos_token_id": 1, c06r3n06: "pad_token_id": 0 c06r3n06: } c06r3n06: c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.model.patcher - Gradient checkpointing enabled. c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.model.adapter - Fine-tuning method: Full c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.model.loader - trainable params: 6738415616 || all params: 6738415616 || trainable%: 100.0000 c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.data.template - Add pad token: c06r3n06: 03/15/2024 11:11:10 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n08: Using custom data configuration default-c71a5e5c5041e81e c06r3n08: Loading Dataset Infos from /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/datasets/packaged_modules/json c06r3n06: Using custom data configuration default-c71a5e5c5041e81e c06r3n07: Using custom data configuration default-c71a5e5c5041e81e c06r3n06: Loading Dataset Infos from /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/datasets/packaged_modules/json c06r3n07: Loading Dataset Infos from /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/datasets/packaged_modules/json c06r3n08: Overwrite dataset info from restored data version if exists. c06r3n09: Using custom data configuration default-c71a5e5c5041e81e c06r3n09: Loading Dataset Infos from /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/datasets/packaged_modules/json c06r3n08: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n07: Overwrite dataset info from restored data version if exists. c06r3n07: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n06: Overwrite dataset info from restored data version if exists. c06r3n06: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n09: Overwrite dataset info from restored data version if exists. c06r3n09: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n08: Found cached dataset json (/work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96) c06r3n08: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n06: Found cached dataset json (/work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96) c06r3n06: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n09: Found cached dataset json (/work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96) c06r3n09: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n07: Found cached dataset json (/work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96) c06r3n07: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n09: Process #0 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-f55b5a094672e9db_00000_of_00002.arrow c06r3n09: Process #1 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-f55b5a094672e9db_00001_of_00002.arrow c06r3n07: Process #0 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-f55b5a094672e9db_00000_of_00002.arrow c06r3n07: Process #1 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-f55b5a094672e9db_00001_of_00002.arrow c06r3n06: Process #0 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-f55b5a094672e9db_00000_of_00002.arrow c06r3n06: Process #1 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-f55b5a094672e9db_00001_of_00002.arrow c06r3n08: Process #0 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-f55b5a094672e9db_00000_of_00002.arrow c06r3n08: Process #1 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-f55b5a094672e9db_00001_of_00002.arrow c06r3n09: Loading cached processed dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-f55b5a094672e9db_*_of_00002.arrow c06r3n09: Concatenating 2 shards c06r3n08: Loading cached processed dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-f55b5a094672e9db_*_of_00002.arrow c06r3n07: Loading cached processed dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-f55b5a094672e9db_*_of_00002.arrow c06r3n06: Loading cached processed dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-f55b5a094672e9db_*_of_00002.arrow c06r3n06: Concatenating 2 shards c06r3n07: Concatenating 2 shards c06r3n08: Concatenating 2 shards c06r3n09: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n07: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n08: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n06: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n09: Using custom data configuration default-ea5892bdcb099afd c06r3n09: Loading Dataset Infos from /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/datasets/packaged_modules/json c06r3n08: Using custom data configuration default-ea5892bdcb099afd c06r3n07: Using custom data configuration default-ea5892bdcb099afd c06r3n07: Loading Dataset Infos from /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/datasets/packaged_modules/json c06r3n08: Loading Dataset Infos from /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/datasets/packaged_modules/json c06r3n06: Using custom data configuration default-ea5892bdcb099afd c06r3n06: Loading Dataset Infos from /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/datasets/packaged_modules/json c06r3n07: Overwrite dataset info from restored data version if exists. c06r3n07: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n07: Found cached dataset json (/work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96) c06r3n07: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n08: Overwrite dataset info from restored data version if exists. c06r3n08: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n08: Found cached dataset json (/work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96) c06r3n08: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n06: Overwrite dataset info from restored data version if exists. c06r3n06: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n08: Process #0 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-4686cdfd54872145_00000_of_00002.arrow c06r3n08: Process #1 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-4686cdfd54872145_00001_of_00002.arrow c06r3n06: Found cached dataset json (/work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96) c06r3n06: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n07: Process #0 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-4686cdfd54872145_00000_of_00002.arrow c06r3n07: Process #1 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-4686cdfd54872145_00001_of_00002.arrow c06r3n06: Process #0 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-4686cdfd54872145_00000_of_00002.arrow c06r3n06: Process #1 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-4686cdfd54872145_00001_of_00002.arrow c06r3n09: Overwrite dataset info from restored data version if exists. c06r3n09: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n09: Found cached dataset json (/work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96) c06r3n09: Loading Dataset info from /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96 c06r3n08: Loading cached processed dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-4686cdfd54872145_*_of_00002.arrow c06r3n08: Concatenating 2 shards c06r3n07: Loading cached processed dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-4686cdfd54872145_*_of_00002.arrow c06r3n07: Concatenating 2 shards c06r3n06: Loading cached processed dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-4686cdfd54872145_*_of_00002.arrow c06r3n06: Concatenating 2 shards c06r3n09: Process #0 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-4686cdfd54872145_00000_of_00002.arrow c06r3n09: Process #1 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-4686cdfd54872145_00001_of_00002.arrow c06r3n09: Loading cached processed dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-ea5892bdcb099afd/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-4686cdfd54872145_*_of_00002.arrow c06r3n09: Concatenating 2 shards c06r3n09: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n09: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n07: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n07: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n07: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n09: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n08: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n06: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n06: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n08: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n08: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n06: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_en.json... c06r3n07: Process #0 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-56e79f9679a95e8c_00000_of_00002.arrow c06r3n07: Process #1 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-56e79f9679a95e8c_00001_of_00002.arrow c06r3n08: Process #0 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-56e79f9679a95e8c_00000_of_00002.arrow c06r3n08: Process #1 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-56e79f9679a95e8c_00001_of_00002.arrow c06r3n09: Process #0 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-56e79f9679a95e8c_00000_of_00002.arrow c06r3n09: Process #1 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-56e79f9679a95e8c_00001_of_00002.arrow c06r3n06: Process #0 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-56e79f9679a95e8c_00000_of_00002.arrow c06r3n06: Process #1 will write at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-56e79f9679a95e8c_00001_of_00002.arrow c06r3n09: 03/15/2024 11:11:12 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n06: 03/15/2024 11:11:13 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n08: 03/15/2024 11:11:13 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n06: 03/15/2024 11:11:13 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n09: 03/15/2024 11:11:13 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n07: 03/15/2024 11:11:13 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n08: 03/15/2024 11:11:13 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n07: 03/15/2024 11:11:13 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n09: 03/15/2024 11:11:13 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n06: 03/15/2024 11:11:13 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n08: 03/15/2024 11:11:13 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n07: 03/15/2024 11:11:13 - INFO - llmtuner.data.loader - Loading dataset alpaca_gpt4_data_zh.json... c06r3n07: Loading cached processed dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-56e79f9679a95e8c_*_of_00002.arrow c06r3n07: Concatenating 2 shards c06r3n06: Loading cached processed dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-56e79f9679a95e8c_*_of_00002.arrow c06r3n06: Concatenating 2 shards c06r3n08: Loading cached processed dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-56e79f9679a95e8c_*_of_00002.arrow c06r3n08: Concatenating 2 shards c06r3n06: input_ids: c06r3n06: [0, 518, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 29892, 3390, 1319, 322, 15993, 20255, 29889, 29849, 1234, 408, 1371, 3730, 408, 1950, 29892, 1550, 1641, 9109, 29889, 3575, 6089, 881, 451, 3160, 738, 10311, 1319, 29892, 443, 621, 936, 29892, 11021, 391, 29892, 7916, 391, 29892, 304, 27375, 29892, 18215, 29892, 470, 27302, 2793, 29889, 3529, 9801, 393, 596, 20890, 526, 5374, 635, 443, 5365, 1463, 322, 6374, 297, 5469, 29889, 13, 13, 3644, 263, 1139, 947, 451, 1207, 738, 4060, 29892, 470, 338, 451, 2114, 1474, 16165, 261, 296, 29892, 5649, 2020, 2012, 310, 22862, 1554, 451, 1959, 29889, 960, 366, 1016, 29915, 29873, 1073, 278, 1234, 304, 263, 1139, 29892, 3113, 1016, 29915, 29873, 6232, 2089, 2472, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 29954, 573, 2211, 25562, 363, 7952, 292, 9045, 29891, 29889, 518, 29914, 25580, 29962, 29871, 29896, 29889, 382, 271, 263, 6411, 8362, 322, 18254, 768, 2738, 652, 300, 29901, 8561, 1854, 596, 592, 1338, 526, 20978, 573, 310, 263, 12875, 310, 285, 21211, 322, 18655, 1849, 29892, 20793, 26823, 29892, 3353, 2646, 1144, 29892, 322, 9045, 29891, 285, 1446, 29889, 910, 6911, 304, 3867, 596, 3573, 411, 278, 18853, 18254, 374, 1237, 304, 740, 472, 967, 1900, 322, 508, 1371, 5557, 17168, 293, 10267, 2129, 29889, 13, 13, 29906, 29889, 2201, 482, 297, 4943, 9128, 6354, 29901, 1222, 6269, 895, 338, 7618, 1455, 363, 7344, 292, 4549, 289, 2873, 29892, 2301, 7799, 29892, 322, 5881, 29875, 586, 6151, 1070, 9045, 29889, 319, 326, 363, 472, 3203, 29871, 29896, 29945, 29900, 6233, 310, 17768, 403, 14911, 711, 293, 15058, 470, 29871, 29955, 29945, 6233, 310, 14877, 20657, 15058, 1269, 4723, 29889, 13, 13, 29941, 29889, 3617, 3307, 8709, 29901, 24162, 3307, 11029, 8709, 338, 7618, 1455, 363, 9128, 322, 19119, 1532, 29899, 915, 292, 29889, 739, 6911, 304, 1072, 5987, 286, 2092, 29892, 11157, 25323, 3321, 740, 29892, 322, 11286, 9045, 29891, 14321, 322, 5198, 1540, 740, 29889, 319, 326, 363, 29871, 29955, 29899, 29929, 6199, 310, 8709, 1269, 4646, 29889, 0] c06r3n07: input_ids: c06r3n07: [0, 518, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 29892, 3390, 1319, 322, 15993, 20255, 29889, 29849, 1234, 408, 1371, 3730, 408, 1950, 29892, 1550, 1641, 9109, 29889, 3575, 6089, 881, 451, 3160, 738, 10311, 1319, 29892, 443, 621, 936, 29892, 11021, 391, 29892, 7916, 391, 29892, 304, 27375, 29892, 18215, 29892, 470, 27302, 2793, 29889, 3529, 9801, 393, 596, 20890, 526, 5374, 635, 443, 5365, 1463, 322, 6374, 297, 5469, 29889, 13, 13, 3644, 263, 1139, 947, 451, 1207, 738, 4060, 29892, 470, 338, 451, 2114, 1474, 16165, 261, 296, 29892, 5649, 2020, 2012, 310, 22862, 1554, 451, 1959, 29889, 960, 366, 1016, 29915, 29873, 1073, 278, 1234, 304, 263, 1139, 29892, 3113, 1016, 29915, 29873, 6232, 2089, 2472, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 29954, 573, 2211, 25562, 363, 7952, 292, 9045, 29891, 29889, 518, 29914, 25580, 29962, 29871, 29896, 29889, 382, 271, 263, 6411, 8362, 322, 18254, 768, 2738, 652, 300, 29901, 8561, 1854, 596, 592, 1338, 526, 20978, 573, 310, 263, 12875, 310, 285, 21211, 322, 18655, 1849, 29892, 20793, 26823, 29892, 3353, 2646, 1144, 29892, 322, 9045, 29891, 285, 1446, 29889, 910, 6911, 304, 3867, 596, 3573, 411, 278, 18853, 18254, 374, 1237, 304, 740, 472, 967, 1900, 322, 508, 1371, 5557, 17168, 293, 10267, 2129, 29889, 13, 13, 29906, 29889, 2201, 482, 297, 4943, 9128, 6354, 29901, 1222, 6269, 895, 338, 7618, 1455, 363, 7344, 292, 4549, 289, 2873, 29892, 2301, 7799, 29892, 322, 5881, 29875, 586, 6151, 1070, 9045, 29889, 319, 326, 363, 472, 3203, 29871, 29896, 29945, 29900, 6233, 310, 17768, 403, 14911, 711, 293, 15058, 470, 29871, 29955, 29945, 6233, 310, 14877, 20657, 15058, 1269, 4723, 29889, 13, 13, 29941, 29889, 3617, 3307, 8709, 29901, 24162, 3307, 11029, 8709, 338, 7618, 1455, 363, 9128, 322, 19119, 1532, 29899, 915, 292, 29889, 739, 6911, 304, 1072, 5987, 286, 2092, 29892, 11157, 25323, 3321, 740, 29892, 322, 11286, 9045, 29891, 14321, 322, 5198, 1540, 740, 29889, 319, 326, 363, 29871, 29955, 29899, 29929, 6199, 310, 8709, 1269, 4646, 29889, 0] c06r3n06: inputs: c06r3n06: ⁇ [INST] <> c06r3n06: You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. c06r3n06: c06r3n06: If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. c06r3n06: <> c06r3n06: c06r3n06: Give three tips for staying healthy. [/INST] 1. Eat a balanced and nutritious diet: Make sure your meals are inclusive of a variety of fruits and vegetables, lean protein, whole grains, and healthy fats. This helps to provide your body with the essential nutrients to function at its best and can help prevent chronic diseases. c06r3n06: c06r3n06: 2. Engage in regular physical activity: Exercise is crucial for maintaining strong bones, muscles, and cardiovascular health. Aim for at least 150 minutes of moderate aerobic exercise or 75 minutes of vigorous exercise each week. c06r3n06: c06r3n06: 3. Get enough sleep: Getting enough quality sleep is crucial for physical and mental well-being. It helps to regulate mood, improve cognitive function, and supports healthy growth and immune function. Aim for 7-9 hours of sleep each night. ⁇ c06r3n07: inputs: c06r3n06: label_ids: c06r3n07: ⁇ [INST] <> c06r3n07: You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. c06r3n07: c06r3n07: If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. c06r3n07: <> c06r3n07: c06r3n07: Give three tips for staying healthy. [/INST] 1. Eat a balanced and nutritious diet: Make sure your meals are inclusive of a variety of fruits and vegetables, lean protein, whole grains, and healthy fats. This helps to provide your body with the essential nutrients to function at its best and can help prevent chronic diseases. c06r3n07: c06r3n07: 2. Engage in regular physical activity: Exercise is crucial for maintaining strong bones, muscles, and cardiovascular health. Aim for at least 150 minutes of moderate aerobic exercise or 75 minutes of vigorous exercise each week. c06r3n07: c06r3n07: 3. Get enough sleep: Getting enough quality sleep is crucial for physical and mental well-being. It helps to regulate mood, improve cognitive function, and supports healthy growth and immune function. Aim for 7-9 hours of sleep each night. ⁇ c06r3n07: label_ids: c06r3n06: [-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 29871, 29896, 29889, 382, 271, 263, 6411, 8362, 322, 18254, 768, 2738, 652, 300, 29901, 8561, 1854, 596, 592, 1338, 526, 20978, 573, 310, 263, 12875, 310, 285, 21211, 322, 18655, 1849, 29892, 20793, 26823, 29892, 3353, 2646, 1144, 29892, 322, 9045, 29891, 285, 1446, 29889, 910, 6911, 304, 3867, 596, 3573, 411, 278, 18853, 18254, 374, 1237, 304, 740, 472, 967, 1900, 322, 508, 1371, 5557, 17168, 293, 10267, 2129, 29889, 13, 13, 29906, 29889, 2201, 482, 297, 4943, 9128, 6354, 29901, 1222, 6269, 895, 338, 7618, 1455, 363, 7344, 292, 4549, 289, 2873, 29892, 2301, 7799, 29892, 322, 5881, 29875, 586, 6151, 1070, 9045, 29889, 319, 326, 363, 472, 3203, 29871, 29896, 29945, 29900, 6233, 310, 17768, 403, 14911, 711, 293, 15058, 470, 29871, 29955, 29945, 6233, 310, 14877, 20657, 15058, 1269, 4723, 29889, 13, 13, 29941, 29889, 3617, 3307, 8709, 29901, 24162, 3307, 11029, 8709, 338, 7618, 1455, 363, 9128, 322, 19119, 1532, 29899, 915, 292, 29889, 739, 6911, 304, 1072, 5987, 286, 2092, 29892, 11157, 25323, 3321, 740, 29892, 322, 11286, 9045, 29891, 14321, 322, 5198, 1540, 740, 29889, 319, 326, 363, 29871, 29955, 29899, 29929, 6199, 310, 8709, 1269, 4646, 29889, 0] c06r3n07: [-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 29871, 29896, 29889, 382, 271, 263, 6411, 8362, 322, 18254, 768, 2738, 652, 300, 29901, 8561, 1854, 596, 592, 1338, 526, 20978, 573, 310, 263, 12875, 310, 285, 21211, 322, 18655, 1849, 29892, 20793, 26823, 29892, 3353, 2646, 1144, 29892, 322, 9045, 29891, 285, 1446, 29889, 910, 6911, 304, 3867, 596, 3573, 411, 278, 18853, 18254, 374, 1237, 304, 740, 472, 967, 1900, 322, 508, 1371, 5557, 17168, 293, 10267, 2129, 29889, 13, 13, 29906, 29889, 2201, 482, 297, 4943, 9128, 6354, 29901, 1222, 6269, 895, 338, 7618, 1455, 363, 7344, 292, 4549, 289, 2873, 29892, 2301, 7799, 29892, 322, 5881, 29875, 586, 6151, 1070, 9045, 29889, 319, 326, 363, 472, 3203, 29871, 29896, 29945, 29900, 6233, 310, 17768, 403, 14911, 711, 293, 15058, 470, 29871, 29955, 29945, 6233, 310, 14877, 20657, 15058, 1269, 4723, 29889, 13, 13, 29941, 29889, 3617, 3307, 8709, 29901, 24162, 3307, 11029, 8709, 338, 7618, 1455, 363, 9128, 322, 19119, 1532, 29899, 915, 292, 29889, 739, 6911, 304, 1072, 5987, 286, 2092, 29892, 11157, 25323, 3321, 740, 29892, 322, 11286, 9045, 29891, 14321, 322, 5198, 1540, 740, 29889, 319, 326, 363, 29871, 29955, 29899, 29929, 6199, 310, 8709, 1269, 4646, 29889, 0] c06r3n08: input_ids: c06r3n08: [0, 518, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 29892, 3390, 1319, 322, 15993, 20255, 29889, 29849, 1234, 408, 1371, 3730, 408, 1950, 29892, 1550, 1641, 9109, 29889, 3575, 6089, 881, 451, 3160, 738, 10311, 1319, 29892, 443, 621, 936, 29892, 11021, 391, 29892, 7916, 391, 29892, 304, 27375, 29892, 18215, 29892, 470, 27302, 2793, 29889, 3529, 9801, 393, 596, 20890, 526, 5374, 635, 443, 5365, 1463, 322, 6374, 297, 5469, 29889, 13, 13, 3644, 263, 1139, 947, 451, 1207, 738, 4060, 29892, 470, 338, 451, 2114, 1474, 16165, 261, 296, 29892, 5649, 2020, 2012, 310, 22862, 1554, 451, 1959, 29889, 960, 366, 1016, 29915, 29873, 1073, 278, 1234, 304, 263, 1139, 29892, 3113, 1016, 29915, 29873, 6232, 2089, 2472, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 29954, 573, 2211, 25562, 363, 7952, 292, 9045, 29891, 29889, 518, 29914, 25580, 29962, 29871, 29896, 29889, 382, 271, 263, 6411, 8362, 322, 18254, 768, 2738, 652, 300, 29901, 8561, 1854, 596, 592, 1338, 526, 20978, 573, 310, 263, 12875, 310, 285, 21211, 322, 18655, 1849, 29892, 20793, 26823, 29892, 3353, 2646, 1144, 29892, 322, 9045, 29891, 285, 1446, 29889, 910, 6911, 304, 3867, 596, 3573, 411, 278, 18853, 18254, 374, 1237, 304, 740, 472, 967, 1900, 322, 508, 1371, 5557, 17168, 293, 10267, 2129, 29889, 13, 13, 29906, 29889, 2201, 482, 297, 4943, 9128, 6354, 29901, 1222, 6269, 895, 338, 7618, 1455, 363, 7344, 292, 4549, 289, 2873, 29892, 2301, 7799, 29892, 322, 5881, 29875, 586, 6151, 1070, 9045, 29889, 319, 326, 363, 472, 3203, 29871, 29896, 29945, 29900, 6233, 310, 17768, 403, 14911, 711, 293, 15058, 470, 29871, 29955, 29945, 6233, 310, 14877, 20657, 15058, 1269, 4723, 29889, 13, 13, 29941, 29889, 3617, 3307, 8709, 29901, 24162, 3307, 11029, 8709, 338, 7618, 1455, 363, 9128, 322, 19119, 1532, 29899, 915, 292, 29889, 739, 6911, 304, 1072, 5987, 286, 2092, 29892, 11157, 25323, 3321, 740, 29892, 322, 11286, 9045, 29891, 14321, 322, 5198, 1540, 740, 29889, 319, 326, 363, 29871, 29955, 29899, 29929, 6199, 310, 8709, 1269, 4646, 29889, 0] c06r3n07: labels: c06r3n07: 1. Eat a balanced and nutritious diet: Make sure your meals are inclusive of a variety of fruits and vegetables, lean protein, whole grains, and healthy fats. This helps to provide your body with the essential nutrients to function at its best and can help prevent chronic diseases. c06r3n07: c06r3n07: 2. Engage in regular physical activity: Exercise is crucial for maintaining strong bones, muscles, and cardiovascular health. Aim for at least 150 minutes of moderate aerobic exercise or 75 minutes of vigorous exercise each week. c06r3n07: c06r3n07: 3. Get enough sleep: Getting enough quality sleep is crucial for physical and mental well-being. It helps to regulate mood, improve cognitive function, and supports healthy growth and immune function. Aim for 7-9 hours of sleep each night. ⁇ c06r3n06: labels: c06r3n06: 1. Eat a balanced and nutritious diet: Make sure your meals are inclusive of a variety of fruits and vegetables, lean protein, whole grains, and healthy fats. This helps to provide your body with the essential nutrients to function at its best and can help prevent chronic diseases. c06r3n06: c06r3n06: 2. Engage in regular physical activity: Exercise is crucial for maintaining strong bones, muscles, and cardiovascular health. Aim for at least 150 minutes of moderate aerobic exercise or 75 minutes of vigorous exercise each week. c06r3n06: c06r3n06: 3. Get enough sleep: Getting enough quality sleep is crucial for physical and mental well-being. It helps to regulate mood, improve cognitive function, and supports healthy growth and immune function. Aim for 7-9 hours of sleep each night. ⁇ c06r3n09: Loading cached processed dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-56e79f9679a95e8c_*_of_00002.arrow c06r3n09: Concatenating 2 shards c06r3n08: inputs: c06r3n08: ⁇ [INST] <> c06r3n08: You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. c06r3n08: c06r3n08: If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. c06r3n08: <> c06r3n08: c06r3n08: Give three tips for staying healthy. [/INST] 1. Eat a balanced and nutritious diet: Make sure your meals are inclusive of a variety of fruits and vegetables, lean protein, whole grains, and healthy fats. This helps to provide your body with the essential nutrients to function at its best and can help prevent chronic diseases. c06r3n08: c06r3n08: 2. Engage in regular physical activity: Exercise is crucial for maintaining strong bones, muscles, and cardiovascular health. Aim for at least 150 minutes of moderate aerobic exercise or 75 minutes of vigorous exercise each week. c06r3n08: c06r3n08: 3. Get enough sleep: Getting enough quality sleep is crucial for physical and mental well-being. It helps to regulate mood, improve cognitive function, and supports healthy growth and immune function. Aim for 7-9 hours of sleep each night. ⁇ c06r3n08: label_ids: c06r3n08: [-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 29871, 29896, 29889, 382, 271, 263, 6411, 8362, 322, 18254, 768, 2738, 652, 300, 29901, 8561, 1854, 596, 592, 1338, 526, 20978, 573, 310, 263, 12875, 310, 285, 21211, 322, 18655, 1849, 29892, 20793, 26823, 29892, 3353, 2646, 1144, 29892, 322, 9045, 29891, 285, 1446, 29889, 910, 6911, 304, 3867, 596, 3573, 411, 278, 18853, 18254, 374, 1237, 304, 740, 472, 967, 1900, 322, 508, 1371, 5557, 17168, 293, 10267, 2129, 29889, 13, 13, 29906, 29889, 2201, 482, 297, 4943, 9128, 6354, 29901, 1222, 6269, 895, 338, 7618, 1455, 363, 7344, 292, 4549, 289, 2873, 29892, 2301, 7799, 29892, 322, 5881, 29875, 586, 6151, 1070, 9045, 29889, 319, 326, 363, 472, 3203, 29871, 29896, 29945, 29900, 6233, 310, 17768, 403, 14911, 711, 293, 15058, 470, 29871, 29955, 29945, 6233, 310, 14877, 20657, 15058, 1269, 4723, 29889, 13, 13, 29941, 29889, 3617, 3307, 8709, 29901, 24162, 3307, 11029, 8709, 338, 7618, 1455, 363, 9128, 322, 19119, 1532, 29899, 915, 292, 29889, 739, 6911, 304, 1072, 5987, 286, 2092, 29892, 11157, 25323, 3321, 740, 29892, 322, 11286, 9045, 29891, 14321, 322, 5198, 1540, 740, 29889, 319, 326, 363, 29871, 29955, 29899, 29929, 6199, 310, 8709, 1269, 4646, 29889, 0] c06r3n08: labels: c06r3n08: 1. Eat a balanced and nutritious diet: Make sure your meals are inclusive of a variety of fruits and vegetables, lean protein, whole grains, and healthy fats. This helps to provide your body with the essential nutrients to function at its best and can help prevent chronic diseases. c06r3n08: c06r3n08: 2. Engage in regular physical activity: Exercise is crucial for maintaining strong bones, muscles, and cardiovascular health. Aim for at least 150 minutes of moderate aerobic exercise or 75 minutes of vigorous exercise each week. c06r3n08: c06r3n08: 3. Get enough sleep: Getting enough quality sleep is crucial for physical and mental well-being. It helps to regulate mood, improve cognitive function, and supports healthy growth and immune function. Aim for 7-9 hours of sleep each night. ⁇ c06r3n09: input_ids: c06r3n09: [0, 518, 25580, 29962, 3532, 14816, 29903, 6778, 13, 3492, 526, 263, 8444, 29892, 3390, 1319, 322, 15993, 20255, 29889, 29849, 1234, 408, 1371, 3730, 408, 1950, 29892, 1550, 1641, 9109, 29889, 3575, 6089, 881, 451, 3160, 738, 10311, 1319, 29892, 443, 621, 936, 29892, 11021, 391, 29892, 7916, 391, 29892, 304, 27375, 29892, 18215, 29892, 470, 27302, 2793, 29889, 3529, 9801, 393, 596, 20890, 526, 5374, 635, 443, 5365, 1463, 322, 6374, 297, 5469, 29889, 13, 13, 3644, 263, 1139, 947, 451, 1207, 738, 4060, 29892, 470, 338, 451, 2114, 1474, 16165, 261, 296, 29892, 5649, 2020, 2012, 310, 22862, 1554, 451, 1959, 29889, 960, 366, 1016, 29915, 29873, 1073, 278, 1234, 304, 263, 1139, 29892, 3113, 1016, 29915, 29873, 6232, 2089, 2472, 29889, 13, 29966, 829, 14816, 29903, 6778, 13, 13, 29954, 573, 2211, 25562, 363, 7952, 292, 9045, 29891, 29889, 518, 29914, 25580, 29962, 29871, 29896, 29889, 382, 271, 263, 6411, 8362, 322, 18254, 768, 2738, 652, 300, 29901, 8561, 1854, 596, 592, 1338, 526, 20978, 573, 310, 263, 12875, 310, 285, 21211, 322, 18655, 1849, 29892, 20793, 26823, 29892, 3353, 2646, 1144, 29892, 322, 9045, 29891, 285, 1446, 29889, 910, 6911, 304, 3867, 596, 3573, 411, 278, 18853, 18254, 374, 1237, 304, 740, 472, 967, 1900, 322, 508, 1371, 5557, 17168, 293, 10267, 2129, 29889, 13, 13, 29906, 29889, 2201, 482, 297, 4943, 9128, 6354, 29901, 1222, 6269, 895, 338, 7618, 1455, 363, 7344, 292, 4549, 289, 2873, 29892, 2301, 7799, 29892, 322, 5881, 29875, 586, 6151, 1070, 9045, 29889, 319, 326, 363, 472, 3203, 29871, 29896, 29945, 29900, 6233, 310, 17768, 403, 14911, 711, 293, 15058, 470, 29871, 29955, 29945, 6233, 310, 14877, 20657, 15058, 1269, 4723, 29889, 13, 13, 29941, 29889, 3617, 3307, 8709, 29901, 24162, 3307, 11029, 8709, 338, 7618, 1455, 363, 9128, 322, 19119, 1532, 29899, 915, 292, 29889, 739, 6911, 304, 1072, 5987, 286, 2092, 29892, 11157, 25323, 3321, 740, 29892, 322, 11286, 9045, 29891, 14321, 322, 5198, 1540, 740, 29889, 319, 326, 363, 29871, 29955, 29899, 29929, 6199, 310, 8709, 1269, 4646, 29889, 0] c06r3n09: inputs: c06r3n09: ⁇ [INST] <> c06r3n09: You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. c06r3n09: c06r3n09: If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information. c06r3n09: <> c06r3n09: c06r3n09: Give three tips for staying healthy. [/INST] 1. Eat a balanced and nutritious diet: Make sure your meals are inclusive of a variety of fruits and vegetables, lean protein, whole grains, and healthy fats. This helps to provide your body with the essential nutrients to function at its best and can help prevent chronic diseases. c06r3n09: c06r3n09: 2. Engage in regular physical activity: Exercise is crucial for maintaining strong bones, muscles, and cardiovascular health. Aim for at least 150 minutes of moderate aerobic exercise or 75 minutes of vigorous exercise each week. c06r3n09: c06r3n09: 3. Get enough sleep: Getting enough quality sleep is crucial for physical and mental well-being. It helps to regulate mood, improve cognitive function, and supports healthy growth and immune function. Aim for 7-9 hours of sleep each night. ⁇ c06r3n09: label_ids: c06r3n09: [-100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, -100, 29871, 29896, 29889, 382, 271, 263, 6411, 8362, 322, 18254, 768, 2738, 652, 300, 29901, 8561, 1854, 596, 592, 1338, 526, 20978, 573, 310, 263, 12875, 310, 285, 21211, 322, 18655, 1849, 29892, 20793, 26823, 29892, 3353, 2646, 1144, 29892, 322, 9045, 29891, 285, 1446, 29889, 910, 6911, 304, 3867, 596, 3573, 411, 278, 18853, 18254, 374, 1237, 304, 740, 472, 967, 1900, 322, 508, 1371, 5557, 17168, 293, 10267, 2129, 29889, 13, 13, 29906, 29889, 2201, 482, 297, 4943, 9128, 6354, 29901, 1222, 6269, 895, 338, 7618, 1455, 363, 7344, 292, 4549, 289, 2873, 29892, 2301, 7799, 29892, 322, 5881, 29875, 586, 6151, 1070, 9045, 29889, 319, 326, 363, 472, 3203, 29871, 29896, 29945, 29900, 6233, 310, 17768, 403, 14911, 711, 293, 15058, 470, 29871, 29955, 29945, 6233, 310, 14877, 20657, 15058, 1269, 4723, 29889, 13, 13, 29941, 29889, 3617, 3307, 8709, 29901, 24162, 3307, 11029, 8709, 338, 7618, 1455, 363, 9128, 322, 19119, 1532, 29899, 915, 292, 29889, 739, 6911, 304, 1072, 5987, 286, 2092, 29892, 11157, 25323, 3321, 740, 29892, 322, 11286, 9045, 29891, 14321, 322, 5198, 1540, 740, 29889, 319, 326, 363, 29871, 29955, 29899, 29929, 6199, 310, 8709, 1269, 4646, 29889, 0] c06r3n09: labels: c06r3n09: 1. Eat a balanced and nutritious diet: Make sure your meals are inclusive of a variety of fruits and vegetables, lean protein, whole grains, and healthy fats. This helps to provide your body with the essential nutrients to function at its best and can help prevent chronic diseases. c06r3n09: c06r3n09: 2. Engage in regular physical activity: Exercise is crucial for maintaining strong bones, muscles, and cardiovascular health. Aim for at least 150 minutes of moderate aerobic exercise or 75 minutes of vigorous exercise each week. c06r3n09: c06r3n09: 3. Get enough sleep: Getting enough quality sleep is crucial for physical and mental well-being. It helps to regulate mood, improve cognitive function, and supports healthy growth and immune function. Aim for 7-9 hours of sleep each night. ⁇ c06r3n07: [INFO|training_args.py:1828] 2024-03-15 11:11:14,578 >> PyTorch: setting up devices c06r3n08: [INFO|training_args.py:1828] 2024-03-15 11:11:14,577 >> PyTorch: setting up devices c06r3n06: [INFO|training_args.py:1828] 2024-03-15 11:11:14,578 >> PyTorch: setting up devices c06r3n09: [INFO|training_args.py:1828] 2024-03-15 11:11:14,581 >> PyTorch: setting up devices c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n07: warnings.warn( c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n07: warnings.warn( c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n07: warnings.warn( c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n07: warnings.warn( c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n09: warnings.warn( c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n09: warnings.warn( c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n09: warnings.warn( c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n09: warnings.warn( c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n06: warnings.warn( c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n08: warnings.warn( c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n08: warnings.warn( c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n08: warnings.warn( c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n06: warnings.warn( c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n06: warnings.warn( c06r3n08: warnings.warn( c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/transformers/training_args.py:1741: FutureWarning: `--push_to_hub_token` is deprecated and will be removed in version 5 of 🤗 Transformers. Use `--hub_token` instead. c06r3n06: warnings.warn( c06r3n08: Loading cached split indices for dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-9854d2224d063093.arrow and /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-e052a5760cca9436.arrow c06r3n09: Loading cached split indices for dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-9854d2224d063093.arrow and /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-e052a5760cca9436.arrow c06r3n07: Loading cached split indices for dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-9854d2224d063093.arrow and /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-e052a5760cca9436.arrow c06r3n06: Loading cached split indices for dataset at /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-9854d2224d063093.arrow and /work/home/liangjing/.cache/huggingface/datasets/json/default-c71a5e5c5041e81e/0.0.0/8bb11242116d547c741b2e8a1f18598ffdd40a1d4f2a2872c7a28b697434bc96/cache-e052a5760cca9436.arrow c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n08: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n08: warnings.warn( c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n07: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n07: warnings.warn( c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n06: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n06: warnings.warn( c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n08: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n08: warnings.warn( c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n07: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n07: warnings.warn( c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n08: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n08: warnings.warn( c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n07: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n07: warnings.warn( c06r3n08: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n08: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n08: warnings.warn( c06r3n07: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n07: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n07: warnings.warn( c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n06: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n06: warnings.warn( c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n06: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n06: warnings.warn( c06r3n06: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n06: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n06: warnings.warn( c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n09: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n09: warnings.warn( c06r3n06: Detected kernel version 3.10.0, which is below the recommended minimum of 5.5.0; this can cause the process to hang. It is recommended to upgrade the kernel to the minimum version or higher. c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n09: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n09: warnings.warn( c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n09: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n09: warnings.warn( c06r3n09: /work/home/liangjing/anaconda3/envs/torch2.1/lib/python3.8/site-packages/accelerate/accelerator.py:432: FutureWarning: Passing the following arguments to `Accelerator` is deprecated and will be removed in version 1.0 of Accelerate: dict_keys(['dispatch_batches', 'split_batches']). Please pass an `accelerate.DataLoaderConfiguration` instead: c06r3n09: dataloader_config = DataLoaderConfiguration(dispatch_batches=None, split_batches=False) c06r3n09: warnings.warn( c06r3n09: [INFO|trainer.py:571] 2024-03-15 11:11:17,914 >> Using auto half precision backend c06r3n07: [INFO|trainer.py:571] 2024-03-15 11:11:17,919 >> Using auto half precision backend c06r3n06: [INFO|trainer.py:571] 2024-03-15 11:11:17,919 >> Using auto half precision backend c06r3n08: [INFO|trainer.py:571] 2024-03-15 11:11:17,918 >> Using auto half precision backend c06r3n06: [2024-03-15 11:11:22,334] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed info: version=0.12.3, git-hash=299681e, git-branch=main c06r3n07: I0315 11:11:22.353425 30392 ProcessGroupNCCL.cpp:686] [Rank 4] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=284493504 c06r3n07: I0315 11:11:22.354048 30395 ProcessGroupNCCL.cpp:686] [Rank 7] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=278088544 c06r3n07: I0315 11:11:22.354669 30394 ProcessGroupNCCL.cpp:686] [Rank 6] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=278394912 c06r3n07: I0315 11:11:22.359563 30393 ProcessGroupNCCL.cpp:686] [Rank 5] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=296712352 c06r3n09: I0315 11:11:22.358403 12646 ProcessGroupNCCL.cpp:686] [Rank 13] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=284665904 c06r3n09: I0315 11:11:22.358805 12648 ProcessGroupNCCL.cpp:686] [Rank 15] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=297337616 c06r3n09: I0315 11:11:22.358872 12647 ProcessGroupNCCL.cpp:686] [Rank 14] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=287324512 c06r3n09: I0315 11:11:22.358886 12645 ProcessGroupNCCL.cpp:686] [Rank 12] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=275042848 c06r3n06: I0315 11:11:22.361479 15478 ProcessGroupNCCL.cpp:686] [Rank 1] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=285314896 c06r3n06: I0315 11:11:22.361505 15477 ProcessGroupNCCL.cpp:686] [Rank 0] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=290255600 c06r3n06: I0315 11:11:22.361519 15479 ProcessGroupNCCL.cpp:686] [Rank 2] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=300963040 c06r3n06: I0315 11:11:22.361651 15480 ProcessGroupNCCL.cpp:686] [Rank 3] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=288532512 c06r3n06: [2024-03-15 11:11:22,362] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Flops Profiler Enabled: False c06r3n06: [2024-03-15 11:11:22,364] [INFO] [logging.py:96:log_dist] [Rank 0] Using client Optimizer as basic optimizer c06r3n06: [2024-03-15 11:11:22,364] [INFO] [logging.py:96:log_dist] [Rank 0] Removing param_group that has no 'params' in the basic Optimizer c06r3n08: I0315 11:11:22.364763 10991 ProcessGroupNCCL.cpp:686] [Rank 9] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=299755440 c06r3n08: I0315 11:11:22.365325 10993 ProcessGroupNCCL.cpp:686] [Rank 11] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=293794144 c06r3n08: I0315 11:11:22.365751 10990 ProcessGroupNCCL.cpp:686] [Rank 8] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=290632576 c06r3n08: I0315 11:11:22.366009 10992 ProcessGroupNCCL.cpp:686] [Rank 10] ProcessGroupNCCL initialization options:NCCL_ASYNC_ERROR_HANDLING: 1, NCCL_DESYNC_DEBUG: 0, NCCL_ENABLE_TIMING: 0, NCCL_BLOCKING_WAIT: 0, TIMEOUT(ms): 1800000, USE_HIGH_PRIORITY_STREAM: 0, TORCH_DISTRIBUTED_DEBUG: OFF, NCCL_DEBUG: OFF, ID=286394016 c06r3n06: [2024-03-15 11:11:22,378] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Basic Optimizer = AdamW c06r3n06: [2024-03-15 11:11:22,378] [INFO] [utils.py:56:is_zero_supported_optimizer] Checking ZeRO support for optimizer=AdamW type= c06r3n06: [2024-03-15 11:11:22,378] [INFO] [logging.py:96:log_dist] [Rank 0] Creating fp16 ZeRO stage 3 optimizer, MiCS is enabled False, Hierarchical params gather False c06r3n06: [2024-03-15 11:11:22,379] [INFO] [logging.py:96:log_dist] [Rank 0] Creating torch.float16 ZeRO stage 3 optimizer c06r3n06: [2024-03-15 11:11:22,591] [INFO] [utils.py:802:see_memory_usage] Stage 3 initialize beginning c06r3n06: [2024-03-15 11:11:22,592] [INFO] [utils.py:803:see_memory_usage] MA 0.82 GB Max_MA 1.31 GB CA 1.0 GB Max_CA 2 GB c06r3n06: [2024-03-15 11:11:22,593] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 14.17 GB, percent = 11.5% c06r3n06: [2024-03-15 11:11:22,595] [INFO] [stage3.py:127:__init__] Reduce bucket size 500000000 c06r3n06: [2024-03-15 11:11:22,595] [INFO] [stage3.py:128:__init__] Prefetch bucket size 50,000,000 c06r3n06: [2024-03-15 11:11:22,749] [INFO] [utils.py:802:see_memory_usage] DeepSpeedZeRoOffload initialize [begin] c06r3n06: [2024-03-15 11:11:22,750] [INFO] [utils.py:803:see_memory_usage] MA 0.82 GB Max_MA 0.82 GB CA 1.0 GB Max_CA 1 GB c06r3n06: [2024-03-15 11:11:22,751] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 14.17 GB, percent = 11.5% c06r3n06: Parameter Offload: Total persistent parameters: 266240 in 65 params c06r3n06: [2024-03-15 11:11:22,967] [INFO] [utils.py:802:see_memory_usage] DeepSpeedZeRoOffload initialize [end] c06r3n06: [2024-03-15 11:11:22,968] [INFO] [utils.py:803:see_memory_usage] MA 0.82 GB Max_MA 0.82 GB CA 1.0 GB Max_CA 1 GB c06r3n06: [2024-03-15 11:11:22,968] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 14.17 GB, percent = 11.5% c06r3n06: [2024-03-15 11:11:23,127] [INFO] [utils.py:802:see_memory_usage] Before creating fp16 partitions c06r3n06: [2024-03-15 11:11:23,128] [INFO] [utils.py:803:see_memory_usage] MA 0.82 GB Max_MA 0.82 GB CA 1.0 GB Max_CA 1 GB c06r3n06: [2024-03-15 11:11:23,129] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 14.17 GB, percent = 11.5% c06r3n06: [2024-03-15 11:11:23,912] [INFO] [utils.py:802:see_memory_usage] After creating fp16 partitions: 2 c06r3n06: [2024-03-15 11:11:23,913] [INFO] [utils.py:803:see_memory_usage] MA 0.82 GB Max_MA 0.82 GB CA 0.82 GB Max_CA 1 GB c06r3n06: [2024-03-15 11:11:23,913] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 14.18 GB, percent = 11.5% c06r3n06: [2024-03-15 11:11:24,171] [INFO] [utils.py:802:see_memory_usage] Before creating fp32 partitions c06r3n06: [2024-03-15 11:11:24,172] [INFO] [utils.py:803:see_memory_usage] MA 0.82 GB Max_MA 0.82 GB CA 0.82 GB Max_CA 1 GB c06r3n06: [2024-03-15 11:11:24,172] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 14.18 GB, percent = 11.5% c06r3n06: [2024-03-15 11:11:24,403] [INFO] [utils.py:802:see_memory_usage] After creating fp32 partitions c06r3n06: [2024-03-15 11:11:24,404] [INFO] [utils.py:803:see_memory_usage] MA 2.39 GB Max_MA 3.17 GB CA 3.18 GB Max_CA 3 GB c06r3n06: [2024-03-15 11:11:24,405] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 14.18 GB, percent = 11.5% c06r3n06: [2024-03-15 11:11:24,623] [INFO] [utils.py:802:see_memory_usage] Before initializing optimizer states c06r3n06: [2024-03-15 11:11:24,624] [INFO] [utils.py:803:see_memory_usage] MA 2.39 GB Max_MA 2.39 GB CA 3.18 GB Max_CA 3 GB c06r3n06: [2024-03-15 11:11:24,624] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 14.18 GB, percent = 11.5% c06r3n06: [2024-03-15 11:11:24,937] [INFO] [utils.py:802:see_memory_usage] After initializing optimizer states c06r3n06: [2024-03-15 11:11:24,938] [INFO] [utils.py:803:see_memory_usage] MA 5.52 GB Max_MA 8.66 GB CA 9.46 GB Max_CA 9 GB c06r3n06: [2024-03-15 11:11:24,939] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 14.27 GB, percent = 11.5% c06r3n06: [2024-03-15 11:11:24,939] [INFO] [stage3.py:479:_setup_for_real_optimizer] optimizer state initialized c06r3n09: [INFO|trainer.py:1721] 2024-03-15 11:11:28,759 >> ***** Running training ***** c06r3n09: [INFO|trainer.py:1722] 2024-03-15 11:11:28,759 >> Num examples = 99,811 c06r3n09: [INFO|trainer.py:1723] 2024-03-15 11:11:28,759 >> Num Epochs = 4 c06r3n09: [INFO|trainer.py:1724] 2024-03-15 11:11:28,759 >> Instantaneous batch size per device = 1 c06r3n09: [INFO|trainer.py:1727] 2024-03-15 11:11:28,759 >> Total train batch size (w. parallel, distributed & accumulation) = 16 c06r3n09: [INFO|trainer.py:1728] 2024-03-15 11:11:28,759 >> Gradient Accumulation steps = 1 c06r3n09: [INFO|trainer.py:1729] 2024-03-15 11:11:28,759 >> Total optimization steps = 24,956 c06r3n07: [INFO|trainer.py:1721] 2024-03-15 11:11:28,760 >> ***** Running training ***** c06r3n07: [INFO|trainer.py:1722] 2024-03-15 11:11:28,760 >> Num examples = 99,811 c06r3n07: [INFO|trainer.py:1723] 2024-03-15 11:11:28,760 >> Num Epochs = 4 c06r3n07: [INFO|trainer.py:1724] 2024-03-15 11:11:28,760 >> Instantaneous batch size per device = 1 c06r3n07: [INFO|trainer.py:1727] 2024-03-15 11:11:28,760 >> Total train batch size (w. parallel, distributed & accumulation) = 16 c06r3n07: [INFO|trainer.py:1728] 2024-03-15 11:11:28,760 >> Gradient Accumulation steps = 1 c06r3n07: [INFO|trainer.py:1729] 2024-03-15 11:11:28,760 >> Total optimization steps = 24,956 c06r3n09: [INFO|trainer.py:1730] 2024-03-15 11:11:28,761 >> Number of trainable parameters = 6,738,415,616 c06r3n07: [INFO|trainer.py:1730] 2024-03-15 11:11:28,762 >> Number of trainable parameters = 6,738,415,616 c06r3n08: [INFO|trainer.py:1721] 2024-03-15 11:11:28,762 >> ***** Running training ***** c06r3n08: [INFO|trainer.py:1722] 2024-03-15 11:11:28,762 >> Num examples = 99,811 c06r3n08: [INFO|trainer.py:1723] 2024-03-15 11:11:28,762 >> Num Epochs = 4 c06r3n08: [INFO|trainer.py:1724] 2024-03-15 11:11:28,762 >> Instantaneous batch size per device = 1 c06r3n08: [INFO|trainer.py:1727] 2024-03-15 11:11:28,762 >> Total train batch size (w. parallel, distributed & accumulation) = 16 c06r3n08: [INFO|trainer.py:1728] 2024-03-15 11:11:28,762 >> Gradient Accumulation steps = 1 c06r3n08: [INFO|trainer.py:1729] 2024-03-15 11:11:28,762 >> Total optimization steps = 24,956 c06r3n08: [INFO|trainer.py:1730] 2024-03-15 11:11:28,764 >> Number of trainable parameters = 6,738,415,616 c06r3n06: [2024-03-15 11:11:28,980] [INFO] [utils.py:802:see_memory_usage] After initializing ZeRO optimizer c06r3n06: [2024-03-15 11:11:28,981] [INFO] [utils.py:803:see_memory_usage] MA 7.24 GB Max_MA 7.73 GB CA 10.14 GB Max_CA 10 GB c06r3n06: [2024-03-15 11:11:28,981] [INFO] [utils.py:810:see_memory_usage] CPU Virtual Memory: used = 14.3 GB, percent = 11.6% c06r3n06: [2024-03-15 11:11:28,982] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed Final Optimizer = AdamW c06r3n06: [2024-03-15 11:11:28,982] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed using client LR scheduler c06r3n06: [2024-03-15 11:11:28,982] [INFO] [logging.py:96:log_dist] [Rank 0] DeepSpeed LR Scheduler = None c06r3n06: [2024-03-15 11:11:28,982] [INFO] [logging.py:96:log_dist] [Rank 0] step=0, skipped=0, lr=[0.0, 0.0], mom=[(0.9, 0.999), (0.9, 0.999)] c06r3n06: [2024-03-15 11:11:28,983] [INFO] [config.py:974:print] DeepSpeedEngine configuration: c06r3n06: [2024-03-15 11:11:28,983] [INFO] [config.py:978:print] activation_checkpointing_config { c06r3n06: "partition_activations": false, c06r3n06: "contiguous_memory_optimization": false, c06r3n06: "cpu_checkpointing": false, c06r3n06: "number_checkpoints": null, c06r3n06: "synchronize_checkpoint_boundary": false, c06r3n06: "profile": false c06r3n06: } c06r3n06: [2024-03-15 11:11:28,983] [INFO] [config.py:978:print] aio_config ................... {'block_size': 1048576, 'queue_depth': 8, 'thread_count': 1, 'single_submit': False, 'overlap_events': True} c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] amp_enabled .................. False c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] amp_params ................... False c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] autotuning_config ............ { c06r3n06: "enabled": false, c06r3n06: "start_step": null, c06r3n06: "end_step": null, c06r3n06: "metric_path": null, c06r3n06: "arg_mappings": null, c06r3n06: "metric": "throughput", c06r3n06: "model_info": null, c06r3n06: "results_dir": "autotuning_results", c06r3n06: "exps_dir": "autotuning_exps", c06r3n06: "overwrite": true, c06r3n06: "fast": true, c06r3n06: "start_profile_step": 3, c06r3n06: "end_profile_step": 5, c06r3n06: "tuner_type": "gridsearch", c06r3n06: "tuner_early_stopping": 5, c06r3n06: "tuner_num_trials": 50, c06r3n06: "model_info_path": null, c06r3n06: "mp_size": 1, c06r3n06: "max_train_batch_size": null, c06r3n06: "min_train_batch_size": 1, c06r3n06: "max_train_micro_batch_size_per_gpu": 1.024000e+03, c06r3n06: "min_train_micro_batch_size_per_gpu": 1, c06r3n06: "num_tuning_micro_batch_sizes": 3 c06r3n06: } c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] bfloat16_enabled ............. False c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] checkpoint_parallel_write_pipeline False c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] checkpoint_tag_validation_enabled True c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] checkpoint_tag_validation_fail False c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] comms_config ................. c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] communication_data_type ...... None c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] compression_config ........... {'weight_quantization': {'shared_parameters': {'enabled': False, 'quantizer_kernel': False, 'schedule_offset': 0, 'quantize_groups': 1, 'quantize_verbose': False, 'quantization_type': 'symmetric', 'quantize_weight_in_forward': False, 'rounding': 'nearest', 'fp16_mixed_quantize': False, 'quantize_change_ratio': 0.001}, 'different_groups': {}}, 'activation_quantization': {'shared_parameters': {'enabled': False, 'quantization_type': 'symmetric', 'range_calibration': 'dynamic', 'schedule_offset': 1000}, 'different_groups': {}}, 'sparse_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'row_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'head_pruning': {'shared_parameters': {'enabled': False, 'method': 'topk', 'schedule_offset': 1000}, 'different_groups': {}}, 'channel_pruning': {'shared_parameters': {'enabled': False, 'method': 'l1', 'schedule_offset': 1000}, 'different_groups': {}}, 'layer_reduction': {'enabled': False}} c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] curriculum_enabled_legacy .... False c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] curriculum_params_legacy ..... False c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] data_efficiency_config ....... {'enabled': False, 'seed': 1234, 'data_sampling': {'enabled': False, 'num_epochs': 1000, 'num_workers': 0, 'curriculum_learning': {'enabled': False}}, 'data_routing': {'enabled': False, 'random_ltd': {'enabled': False, 'layer_token_lr_schedule': {'enabled': False}}}} c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] data_efficiency_enabled ...... False c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] dataloader_drop_last ......... False c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] disable_allgather ............ False c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] dump_state ................... False c06r3n06: [2024-03-15 11:11:28,984] [INFO] [config.py:978:print] dynamic_loss_scale_args ...... {'init_scale': 65536, 'scale_window': 1000, 'delayed_shift': 2, 'consecutive_hysteresis': False, 'min_scale': 1} c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] eigenvalue_enabled ........... False c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] eigenvalue_gas_boundary_resolution 1 c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] eigenvalue_layer_name ........ bert.encoder.layer c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] eigenvalue_layer_num ......... 0 c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] eigenvalue_max_iter .......... 100 c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] eigenvalue_stability ......... 1e-06 c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] eigenvalue_tol ............... 0.01 c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] eigenvalue_verbose ........... False c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] elasticity_enabled ........... False c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] flops_profiler_config ........ { c06r3n06: "enabled": false, c06r3n06: "recompute_fwd_factor": 0.0, c06r3n06: "profile_step": 1, c06r3n06: "module_depth": -1, c06r3n06: "top_modules": 1, c06r3n06: "detailed": true, c06r3n06: "output_file": null c06r3n06: } c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] fp16_auto_cast ............... False c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] fp16_enabled ................. True c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] fp16_master_weights_and_gradients False c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] global_rank .................. 0 c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] grad_accum_dtype ............. None c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] gradient_accumulation_steps .. 1 c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] gradient_clipping ............ 0.0 c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] gradient_predivide_factor .... 1.0 c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] hybrid_engine ................ enabled=False max_out_tokens=512 inference_tp_size=1 release_inference_cache=False pin_parameters=True tp_gather_partition_size=8 c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] initial_dynamic_scale ........ 65536 c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] load_universal_checkpoint .... False c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] loss_scale ................... 0 c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] memory_breakdown ............. False c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] mics_hierarchial_params_gather False c06r3n06: [2024-03-15 11:11:28,985] [INFO] [config.py:978:print] mics_shard_size .............. -1 c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] monitor_config ............... tensorboard=TensorBoardConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') wandb=WandbConfig(enabled=False, group=None, team=None, project='deepspeed') csv_monitor=CSVConfig(enabled=False, output_path='', job_name='DeepSpeedJobName') enabled=False c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] nebula_config ................ { c06r3n06: "enabled": false, c06r3n06: "persistent_storage_path": null, c06r3n06: "persistent_time_interval": 100, c06r3n06: "num_of_version_in_retention": 2, c06r3n06: "enable_nebula_load": true, c06r3n06: "load_path": null c06r3n06: } c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] optimizer_legacy_fusion ...... False c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] optimizer_name ............... None c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] optimizer_params ............. None c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] pipeline ..................... {'stages': 'auto', 'partition': 'best', 'seed_layers': False, 'activation_checkpoint_interval': 0, 'pipe_partitioned': True, 'grad_partitioned': True} c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] pld_enabled .................. False c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] pld_params ................... False c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] prescale_gradients ........... False c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] scheduler_name ............... None c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] scheduler_params ............. None c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] seq_parallel_communication_data_type torch.float32 c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] sparse_attention ............. None c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] sparse_gradients_enabled ..... False c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] steps_per_print .............. inf c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] train_batch_size ............. 16 c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] train_micro_batch_size_per_gpu 1 c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] use_node_local_storage ....... True c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] wall_clock_breakdown ......... False c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] weight_quantization_config ... None c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] world_size ................... 16 c06r3n06: [2024-03-15 11:11:28,986] [INFO] [config.py:978:print] zero_allow_untested_optimizer True c06r3n06: [2024-03-15 11:11:28,987] [INFO] [config.py:978:print] zero_config .................. stage=3 contiguous_gradients=True reduce_scatter=True reduce_bucket_size=500000000 allgather_partitions=True allgather_bucket_size=500000000 overlap_comm=False load_from_fp32_weights=True elastic_checkpoint=False offload_param=None offload_optimizer=None sub_group_size=1,000,000,000 cpu_offload_param=None cpu_offload_use_pin_memory=None cpu_offload=None prefetch_bucket_size=50,000,000 param_persistence_threshold=100,000 model_persistence_threshold=sys.maxsize max_live_parameters=1,000,000,000 max_reuse_distance=1,000,000,000 gather_16bit_weights_on_model_save=False stage3_gather_fp16_weights_on_model_save=False ignore_unused_parameters=True legacy_stage1=False round_robin_gradients=False zero_hpz_partition_size=1 zero_quantized_weights=False zero_quantized_nontrainable_weights=False zero_quantized_gradients=False mics_shard_size=-1 mics_hierarchical_params_gather=False memory_efficient_linear=True pipeline_loading_checkpoint=False override_module_apply=True c06r3n06: [2024-03-15 11:11:28,987] [INFO] [config.py:978:print] zero_enabled ................. True c06r3n06: [2024-03-15 11:11:28,987] [INFO] [config.py:978:print] zero_force_ds_cpu_optimizer .. True c06r3n06: [2024-03-15 11:11:28,987] [INFO] [config.py:978:print] zero_optimization_stage ...... 3 c06r3n06: [2024-03-15 11:11:28,987] [INFO] [config.py:964:print_user_config] json = { c06r3n06: "train_micro_batch_size_per_gpu": 1, c06r3n06: "train_batch_size": 16, c06r3n06: "zero_allow_untested_optimizer": true, c06r3n06: "fp16": { c06r3n06: "enabled": true, c06r3n06: "loss_scale": 0, c06r3n06: "initial_scale_power": 16, c06r3n06: "loss_scale_window": 1000, c06r3n06: "hysteresis": 2, c06r3n06: "min_loss_scale": 1 c06r3n06: }, c06r3n06: "zero_force_ds_cpu_optimizer": true, c06r3n06: "zero_optimization": { c06r3n06: "stage": 3, c06r3n06: "stage3_gather_16bit_weights_on_model_save": false, c06r3n06: "allgather_partitions": true, c06r3n06: "allgather_bucket_size": 5.000000e+08, c06r3n06: "overlap_comm": false, c06r3n06: "reduce_scatter": true, c06r3n06: "reduce_bucket_size": 5.000000e+08, c06r3n06: "contiguous_gradients": true c06r3n06: }, c06r3n06: "checkpoint": { c06r3n06: "use_node_local_storage": true c06r3n06: }, c06r3n06: "gradient_accumulation_steps": 1, c06r3n06: "steps_per_print": inf, c06r3n06: "bf16": { c06r3n06: "enabled": false c06r3n06: } c06r3n06: } c06r3n06: [INFO|trainer.py:1721] 2024-03-15 11:11:28,987 >> ***** Running training ***** c06r3n06: [INFO|trainer.py:1722] 2024-03-15 11:11:28,987 >> Num examples = 99,811 c06r3n06: [INFO|trainer.py:1723] 2024-03-15 11:11:28,987 >> Num Epochs = 4 c06r3n06: [INFO|trainer.py:1724] 2024-03-15 11:11:28,987 >> Instantaneous batch size per device = 1 c06r3n06: [INFO|trainer.py:1727] 2024-03-15 11:11:28,987 >> Total train batch size (w. parallel, distributed & accumulation) = 16 c06r3n06: [INFO|trainer.py:1728] 2024-03-15 11:11:28,987 >> Gradient Accumulation steps = 1 c06r3n06: [INFO|trainer.py:1729] 2024-03-15 11:11:28,987 >> Total optimization steps = 24,956 c06r3n06: [INFO|trainer.py:1730] 2024-03-15 11:11:28,989 >> Number of trainable parameters = 6,738,415,616 c06r3n06: 0%| | 0/24956 [00:00