"examples/movement-pruning/masked_run_glue.py" did not exist on "31c23bd5ee26425a67f92fc170789656379252a6"
Commit 1bfbcff0 authored by wanglch's avatar wanglch
Browse files

Initial commit

parents
Pipeline #1204 canceled with stages
# Experimental environment: V100, A10, 3090
CUDA_VISIBLE_DEVICES=0 \
swift infer \
--ckpt_dir "output/internlm-xcomposer2-7b-chat/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--use_flash_attn false \
--max_new_tokens 2048 \
--temperature 0.5 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--merge_lora false \
# Experimental environment: V100, A10, 3090
# 21GB GPU memory
CUDA_VISIBLE_DEVICES=0 \
swift sft \
--model_type internlm-xcomposer2-7b-chat \
--sft_type lora \
--tuner_backend peft \
--template_type AUTO \
--dtype AUTO \
--output_dir output \
--dataset coco-en-mini \
--train_dataset_sample -1 \
--num_train_epochs 1 \
--max_length 2048 \
--check_dataset_strategy warning \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules DEFAULT \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 16 \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
--use_flash_attn false \
# Experimental environment: A100
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python llm_infer.py \
--ckpt_dir "output/llama2-13b-chat/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--max_new_tokens 2048 \
--temperature 0.1 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--merge_lora false \
# Experimental environment: 2 * A100
# 2 * 37GB GPU memory
nproc_per_node=2
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0,1 \
torchrun \
--nproc_per_node=$nproc_per_node \
--master_port 29500 \
llm_sft.py \
--model_id_or_path modelscope/Llama-2-13b-chat-ms \
--model_revision master \
--sft_type longlora \
--tuner_backend peft \
--template_type AUTO \
--dtype AUTO \
--output_dir output \
--ddp_backend nccl \
--dataset leetcode-python-en \
--train_dataset_sample -1 \
--num_train_epochs 1 \
--max_length 4096 \
--check_dataset_strategy warning \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules ALL \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps $(expr 16 / $nproc_per_node) \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
--deepspeed default-zero2 \
--save_only_model true \
# Experimental environment: A10
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python llm_infer.py \
--ckpt_dir "output/llama2-13b-chat/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--max_new_tokens 2048 \
--temperature 0.1 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--merge_lora false \
# Experimental environment: 2 * A10
# 2 * 16GB GPU memory
nproc_per_node=2
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0,1 \
torchrun \
--nproc_per_node=$nproc_per_node \
--master_port 29500 \
llm_sft.py \
--model_id_or_path modelscope/Llama-2-13b-chat-ms \
--model_revision master \
--sft_type lora \
--tuner_backend peft \
--template_type AUTO \
--dtype AUTO \
--output_dir output \
--ddp_backend nccl \
--dataset leetcode-python-en \
--train_dataset_sample -1 \
--num_train_epochs 1 \
--max_length 4096 \
--check_dataset_strategy warning \
--quantization_bit 4 \
--bnb_4bit_comp_dtype AUTO \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules ALL \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps $(expr 16 / $nproc_per_node) \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
--deepspeed default-zero2 \
# Experimental environment: A10
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python rome_infer.py \
--model_id_or_path modelscope/Llama-2-13b-chat-ms \
--model_revision master \
--template_type AUTO \
--dtype AUTO \
--max_new_tokens 128 \
--temperature 0.1 \
--top_p 0.7 \
--do_sample true \
--rome_request_file rome_example/request.json
# Experimental environment: A100
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python llm_infer.py \
--ckpt_dir "output/llama2-70b-chat/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--max_new_tokens 2048 \
--temperature 0.1 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--merge_lora false \
# Experimental environment: 2 * A100
# 2 * 50GB GPU memory
nproc_per_node=2
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0,1 \
torchrun \
--nproc_per_node=$nproc_per_node \
--master_port 29500 \
llm_sft.py \
--model_id_or_path modelscope/Llama-2-70b-chat-ms \
--model_revision master \
--sft_type lora \
--tuner_backend peft \
--template_type AUTO \
--dtype AUTO \
--output_dir output \
--ddp_backend nccl \
--dataset leetcode-python-en \
--train_dataset_sample -1 \
--num_train_epochs 1 \
--max_length 4096 \
--check_dataset_strategy warning \
--quantization_bit 4 \
--bnb_4bit_comp_dtype AUTO \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules q_proj v_proj \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps $(expr 16 / $nproc_per_node) \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
--deepspeed default-zero2 \
{
"compute_environment": "LOCAL_MACHINE",
"debug": false,
"distributed_type": "FSDP",
"downcast_bf16": "no",
"fsdp_config": {
"fsdp_auto_wrap_policy": "TRANSFORMER_BASED_WRAP",
"fsdp_backward_prefetch": "BACKWARD_PRE",
"fsdp_cpu_ram_efficient_loading": true,
"fsdp_forward_prefetch": false,
"fsdp_offload_params": true,
"fsdp_sharding_strategy": "FULL_SHARD",
"fsdp_state_dict_type": "FULL_STATE_DICT",
"fsdp_sync_module_states": true,
"fsdp_use_orig_params": false
},
"machine_rank": 0,
"main_training_function": "main",
"mixed_precision": "no",
"num_machines": 1,
"num_processes": 2,
"rdzv_backend": "static",
"same_network": true,
"tpu_env": [],
"tpu_use_cluster": false,
"tpu_use_sudo": false,
"use_cpu": false
}
# 2 GPU 80G memory total
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python llm_infer.py \
--ckpt_dir "output/llama2-70b-chat/vxx-xxx-xxxx/checkpoint-xx" \
--load_dataset_config true \
--max_new_tokens 2048 \
--temperature 0.1 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--merge_lora false \
# 2 GPU * 24G
# bitsandbytes>=0.43.0 needed
nproc_per_node=2
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0,1 \
accelerate launch --config_file "./scripts/llama2_70b_chat/qlora_fsdp/fsdp_offload.json" \
llm_sft.py \
--model_type llama2-70b-chat \
--model_revision master \
--sft_type lora \
--tuner_backend peft \
--template_type AUTO \
--dtype bf16 \
--output_dir output \
--dataset leetcode-python-en \
--train_dataset_sample -1 \
--num_train_epochs 1 \
--max_length 2048 \
--check_dataset_strategy warning \
--quantization_bit 4 \
--bnb_4bit_comp_dtype AUTO \
--bnb_4bit_quant_storage bfloat16 \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dtype AUTO \
--lora_dropout_p 0.05 \
--lora_target_modules DEFAULT \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps $(expr 16 / $nproc_per_node) \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 50 \
--save_steps 50 \
--save_total_limit 2 \
--logging_steps 10 \
# Experimental environment: 2 * 3090
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0,1 \
python llm_infer.py \
--ckpt_dir "output/llama2-70b-chat/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--max_new_tokens 2048 \
--temperature 0.1 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--merge_lora false \
# Experimental environment: 2 * 3090
# 2 * 23GB GPU memory
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0,1 \
python llm_sft.py \
--model_id_or_path modelscope/Llama-2-70b-chat-ms \
--model_revision master \
--sft_type lora \
--tuner_backend peft \
--template_type AUTO \
--dtype AUTO \
--output_dir output \
--dataset sql-create-context-en \
--train_dataset_sample 20000 \
--num_train_epochs 1 \
--max_length 2048 \
--check_dataset_strategy warning \
--quantization_bit 4 \
--bnb_4bit_comp_dtype AUTO \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules q_proj v_proj \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 16 \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
# Experiment env: A10, RTX3090/4090, A100
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0,1 \
python llm_infer.py \
--ckpt_dir "output/llama2-7b-aqlm-2bit-1x16/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--use_flash_attn true \
--max_new_tokens 2048 \
--temperature 0.5 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--stream false \
--merge_lora false \
# Experiment env: A10, RTX3090/4090, A100
# 1 * 7.5GB GPU memory
PYTHONPATH=../../.. \
CUDA_VISIBLE_DEVICES=0 \
python llm_sft.py \
--model_type llama2-7b-aqlm-2bit-1x16 \
--dataset dureader-robust-zh \
--batch_size 4 \
--max_length 1024 \
--gradient_accumulation_steps 2 \
--learning_rate 5e-5 \
--use_flash_attn true \
--eval_steps 1000 \
--save_steps 1000 \
--train_dataset_sample -1 \
--num_train_epochs 2 \
--check_dataset_strategy none \
--gradient_checkpointing true \
--weight_decay 0.1 \
--max_grad_norm 1.0 \
--warmup_ratio 0.03 \
--save_total_limit 2 \
--logging_steps 10 \
--sft_type lora \
--lora_target_modules ALL \
--lora_rank 8 \
--lora_alpha 32
# Experimental environment: 3090, A10, V100
CUDA_VISIBLE_DEVICES=0 \
swift infer \
--ckpt_dir "output/llama3-8b-instruct/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--max_new_tokens 2048 \
--temperature 0.1 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--merge_lora false \
# Experimental environment: 3090, A10, V100
# 20GB GPU memory
CUDA_VISIBLE_DEVICES=0 \
swift sft \
--model_id_or_path LLM-Research/Meta-Llama-3-8B-Instruct \
--model_revision master \
--sft_type lora \
--tuner_backend peft \
--template_type AUTO \
--dtype AUTO \
--output_dir output \
--dataset blossom-math-zh \
--train_dataset_sample -1 \
--num_train_epochs 5 \
--max_length 2048 \
--check_dataset_strategy warning \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules ALL \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps 16 \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
# Experimental environment: 3090, A10, V100
CUDA_VISIBLE_DEVICES=0 \
swift infer \
--ckpt_dir "output/llama3-8b-instruct/vx-xxx/checkpoint-xxx" \
--load_dataset_config true \
--max_new_tokens 2048 \
--temperature 0.1 \
--top_p 0.7 \
--repetition_penalty 1. \
--do_sample true \
--merge_lora false \
# Experimental environment: 2 * 3090
# 2 * 22GB GPU memory
nproc_per_node=2
NPROC_PER_NODE=$nproc_per_node \
MASTER_PORT=29500 \
CUDA_VISIBLE_DEVICES=0,1 \
swift sft \
--model_id_or_path LLM-Research/Meta-Llama-3-8B-Instruct \
--model_revision master \
--sft_type lora \
--tuner_backend peft \
--template_type AUTO \
--dtype AUTO \
--output_dir output \
--ddp_backend nccl \
--dataset blossom-math-zh \
--train_dataset_sample -1 \
--num_train_epochs 5 \
--max_length 2048 \
--check_dataset_strategy warning \
--lora_rank 8 \
--lora_alpha 32 \
--lora_dropout_p 0.05 \
--lora_target_modules ALL \
--gradient_checkpointing true \
--batch_size 1 \
--weight_decay 0.1 \
--learning_rate 1e-4 \
--gradient_accumulation_steps $(expr 16 / $nproc_per_node) \
--max_grad_norm 0.5 \
--warmup_ratio 0.03 \
--eval_steps 100 \
--save_steps 100 \
--save_total_limit 2 \
--logging_steps 10 \
--deepspeed default-zero2 \
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment