Add additional demo scripts

This commit is contained in:
Ming Jin 2024-02-26 15:22:13 +11:00
parent 489f32675f
commit 80769b5245
3 changed files with 332 additions and 0 deletions

108
scripts/TimeLLM_ECL.sh Normal file
View File

@ -0,0 +1,108 @@
model_name=TimeLLM
train_epochs=10
learning_rate=0.01
llama_layers=32
master_port=00097
num_process=8
batch_size=24
d_model=16
d_ff=32
comment='TimeLLM-ECL'
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/electricity/ \
--data_path electricity.csv \
--model_id ECL_512_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 321 \
--dec_in 321 \
--c_out 321 \
--batch_size $batch_size \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/electricity/ \
--data_path electricity.csv \
--model_id ECL_512_192 \
--model $model_name \
--data custom \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 192 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 321 \
--dec_in 321 \
--c_out 321 \
--batch_size $batch_size \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/electricity/ \
--data_path electricity.csv \
--model_id ECL_512_336 \
--model $model_name \
--data custom \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 336 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 321 \
--dec_in 321 \
--c_out 321 \
--batch_size $batch_size \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/electricity/ \
--data_path electricity.csv \
--model_id ECL_512_720 \
--model $model_name \
--data custom \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 720 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 321 \
--dec_in 321 \
--c_out 321 \
--batch_size $batch_size \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment

108
scripts/TimeLLM_Traffic.sh Normal file
View File

@ -0,0 +1,108 @@
model_name=TimeLLM
train_epochs=10
learning_rate=0.01
llama_layers=32
master_port=00097
num_process=8
batch_size=24
d_model=16
d_ff=32
comment='TimeLLM-Traffic'
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/traffic/ \
--data_path traffic.csv \
--model_id traffic_512_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 862 \
--dec_in 862 \
--c_out 862 \
--batch_size $batch_size \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/traffic/ \
--data_path traffic.csv \
--model_id traffic_512_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 192 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 862 \
--dec_in 862 \
--c_out 862 \
--batch_size $batch_size \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/traffic/ \
--data_path traffic.csv \
--model_id traffic_512_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 336 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 862 \
--dec_in 862 \
--c_out 862 \
--batch_size 1 \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/traffic/ \
--data_path traffic.csv \
--model_id traffic_512_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 512 \
--label_len 720 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 862 \
--dec_in 862 \
--c_out 862 \
--batch_size $batch_size \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment

116
scripts/TimeLLM_Weather.sh Normal file
View File

@ -0,0 +1,116 @@
model_name=TimeLLM
train_epochs=10
learning_rate=0.01
llama_layers=32
master_port=00097
num_process=8
batch_size=24
d_model=16
d_ff=32
comment='TimeLLM-Weather'
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/weather/ \
--data_path weather.csv \
--model_id weather_512_96 \
--model $model_name \
--data custom \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 96 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 21 \
--dec_in 21 \
--c_out 21 \
--d_model 32 \
--d_ff 32 \
--batch_size $batch_size \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/weather/ \
--data_path weather.csv \
--model_id weather_512_192 \
--model $model_name \
--data custom \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 192 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 21 \
--dec_in 21 \
--c_out 21 \
--d_model 32 \
--d_ff 32 \
--batch_size $batch_size \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs $train_epochs \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/weather/ \
--data_path weather.csv \
--model_id weather_512_336 \
--model $model_name \
--data custom \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 336 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 21 \
--dec_in 21 \
--c_out 21 \
--d_model 32 \
--d_ff 128 \
--batch_size $batch_size \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs 10 \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/weather/ \
--data_path weather.csv \
--model_id weather_512_720 \
--model $model_name \
--data custom \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 720 \
--e_layers 2 \
--d_layers 1 \
--factor 3 \
--enc_in 21 \
--dec_in 21 \
--c_out 21 \
--d_model 32 \
--d_ff 128 \
--batch_size $batch_size \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs 15 \
--model_comment $comment