export CUDA_VISIBLE_DEVICES=7 export USE_MIOPEN_BATCHNORM=1 export TF_CPP_MIN_LOG_LEVEL=2 export XLA_PYTHON_CLIENT_PREALLOCATE=false python3 -m experiments.extended_benchmarks.run_timesfm \ --model_path="model/checkpoints" \ --backend="gpu" python -c "print('finish run_timesfm!!!!')" for dataset in etth1 ettm1 do for pred_len in 96 192 336 do python3 -m experiments.long_horizon_benchmarks.run_eval \ --model_path="model/checkpoints" \ --backend="gpu" \ --pred_len=$pred_len \ --context_len=512 \ --dataset=$dataset done done python -c "print('finish run_eval!!!!')" # python3 -m experiments.long_horizon_benchmarks.run_eval \ # --model_path="model/checkpoints" \ # --backend="gpu" \ # --pred_len=96 \ # --context_len=512 \ # --dataset=etth1 # python3 -m experiments.long_horizon_benchmarks.run_eval \ # --model_path="model/checkpoints" \ # --backend="gpu" \ # --pred_len=96 \ # --context_len=512 \ # --dataset=ettm1 # python3 -m experiments.long_horizon_benchmarks.run_eval \ # --model_path="model/checkpoints" \ # --backend="gpu" \ # --pred_len=192 \ # --context_len=512 \ # --dataset=etth1 # python3 -m experiments.long_horizon_benchmarks.run_eval \ # --model_path="model/checkpoints" \ # --backend="gpu" \ # --pred_len=192 \ # --context_len=512 \ # --dataset=ettm1 # python3 -m experiments.long_horizon_benchmarks.run_eval \ # --model_path="model/checkpoints" \ # --backend="gpu" \ # --pred_len=336 \ # --context_len=512 \ # --dataset=etth1 # python3 -m experiments.long_horizon_benchmarks.run_eval \ # --model_path="model/checkpoints" \ # --backend="gpu" \ # --pred_len=336 \ # --context_len=512 \ # --dataset=ettm1