Time-LLM/scripts/TimeLLM_ETTm2.sh
2024-01-29 15:53:06 +11:00

125 lines
2.9 KiB
Bash

model_name=TimeLLM
train_epochs=10
learning_rate=0.01
llama_layers=32
master_port=00097
num_process=8
batch_size=24
d_model=32
d_ff=128
comment='TimeLLM-ETTm2'
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run_main.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/ETT-small/ \
--data_path ETTm2.csv \
--model_id ETTm2_512_96 \
--model $model_name \
--data ETTm2 \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 96 \
--factor 3 \
--enc_in 7 \
--dec_in 7 \
--c_out 7 \
--des 'Exp' \
--itr 1 \
--d_model $d_model \
--d_ff $d_ff \
--batch_size 16 \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run_main.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/ETT-small/ \
--data_path ETTm2.csv \
--model_id ETTm2_512_192 \
--model $model_name \
--data ETTm2 \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 192 \
--factor 3 \
--enc_in 7 \
--dec_in 7 \
--c_out 7 \
--des 'Exp' \
--itr 1 \
--d_model $d_model \
--d_ff $d_ff \
--batch_size $batch_size \
--learning_rate $learning_rate \
--lradj 'TST'\
--learning_rate 0.002 \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run_main.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/ETT-small/ \
--data_path ETTm2.csv \
--model_id ETTm2_512_336 \
--model $model_name \
--data ETTm2 \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 336 \
--factor 3 \
--enc_in 7 \
--dec_in 7 \
--c_out 7 \
--des 'Exp' \
--itr 1 \
--d_model $d_model \
--d_ff $d_ff \
--batch_size $batch_size \
--learning_rate $learning_rate \
--lradj 'TST'\
--learning_rate 0.002 \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run_main.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/ETT-small/ \
--data_path ETTm2.csv \
--model_id ETTm2_512_720 \
--model $model_name \
--data ETTm2 \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 720 \
--factor 3 \
--enc_in 7 \
--dec_in 7 \
--c_out 7 \
--des 'Exp' \
--itr 1 \
--d_model $d_model \
--d_ff $d_ff \
--batch_size $batch_size \
--learning_rate $learning_rate \
--lradj 'TST'\
--learning_rate 0.002 \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment