Support zero-shot forecasting functionality

This commit is contained in:
Ming Jin 2024-02-09 01:16:10 +11:00
parent 39d7d77c02
commit 37dacef293
6 changed files with 662 additions and 1 deletions

View File

View File

@ -0,0 +1,48 @@
from torch.utils.data import DataLoader
from data_provider_pretrain.data_loader import Dataset_ETT_hour, Dataset_ETT_minute
data_dict = {
'ETTh1': Dataset_ETT_hour,
'ETTh2': Dataset_ETT_hour,
'ETTm1': Dataset_ETT_minute,
'ETTm2': Dataset_ETT_minute,
}
def data_provider(args, data, data_path, pretrain=True, flag='train'):
Data = data_dict[data]
timeenc = 0 if args.embed != 'timeF' else 1
percent = args.percent
if flag == 'test':
shuffle_flag = False
drop_last = True
batch_size = args.batch_size
freq = args.freq
else:
shuffle_flag = True
drop_last = True
batch_size = args.batch_size
freq = args.freq
data_set = Data(
root_path=args.root_path,
data_path=data_path,
flag=flag,
size=[args.seq_len, args.label_len, args.pred_len],
features=args.features,
target=args.target,
timeenc=timeenc,
freq=freq,
percent=percent,
seasonal_patterns=args.seasonal_patterns,
pretrain=pretrain
)
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=shuffle_flag,
num_workers=args.num_workers,
drop_last=drop_last)
return data_set, data_loader

View File

@ -0,0 +1,221 @@
import os
import pandas as pd
from torch.utils.data import Dataset
from sklearn.preprocessing import StandardScaler
from utils.timefeatures import time_features
import warnings
warnings.filterwarnings('ignore')
class Dataset_ETT_hour(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTh1.csv',
target='OT', scale=True, timeenc=0, freq='h', percent=100,
seasonal_patterns=None, pretrain=True):
if size == None:
self.seq_len = 24 * 4 * 4
self.label_len = 24 * 4
self.pred_len = 24 * 4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train': 0, 'val': 1, 'test': 2}
self.set_type = type_map[flag]
self.percent = percent
self.pretrain = pretrain
self.features = features
self.target = target
self.scale = scale
self.timeenc = timeenc
self.freq = freq
# self.percent = percent
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
self.enc_in = self.data_x.shape[-1]
self.tot_len = len(self.data_x) - self.seq_len - self.pred_len + 1
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
if self.pretrain:
# border1s = [0, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len]
# border2s = [12 * 30 * 24 + 8 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24]
border1s = [0, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len]
border2s = [12 * 30 * 24 + 4 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24]
else:
border1s = [0, 12 * 30 * 24 - self.seq_len, 12 * 30 * 24 + 4 * 30 * 24 - self.seq_len]
border2s = [12 * 30 * 24, 12 * 30 * 24 + 4 * 30 * 24, 12 * 30 * 24 + 8 * 30 * 24]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.set_type == 0:
border2 = (border2 - self.seq_len) * self.percent // 100 + self.seq_len
if self.features == 'M' or self.features == 'MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features == 'S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
if self.timeenc == 0:
df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)
df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)
df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)
df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)
data_stamp = df_stamp.drop(['date'], 1).values
elif self.timeenc == 1:
data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)
data_stamp = data_stamp.transpose(1, 0)
self.data_x = data[border1:border2]
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
feat_id = index // self.tot_len
s_begin = index % self.tot_len
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end, feat_id:feat_id + 1]
seq_y = self.data_y[r_begin:r_end, feat_id:feat_id + 1]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return (len(self.data_x) - self.seq_len - self.pred_len + 1) * self.enc_in
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
class Dataset_ETT_minute(Dataset):
def __init__(self, root_path, flag='train', size=None,
features='S', data_path='ETTm1.csv',
target='OT', scale=True, timeenc=0, freq='t', percent=100,
seasonal_patterns=None, pretrain=True):
if size == None:
self.seq_len = 24 * 4 * 4
self.label_len = 24 * 4
self.pred_len = 24 * 4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ['train', 'test', 'val']
type_map = {'train': 0, 'val': 1, 'test': 2}
self.set_type = type_map[flag]
self.percent = percent
self.pretrain = pretrain
self.features = features
self.target = target
self.scale = scale
self.timeenc = timeenc
self.freq = freq
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
self.enc_in = self.data_x.shape[-1]
self.tot_len = len(self.data_x) - self.seq_len - self.pred_len + 1
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path,
self.data_path))
if self.pretrain:
# border1s = [0, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len,
# 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len]
# border2s = [12 * 30 * 24 * 4 + 8 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 8 * 30 * 24 * 4,
# 12 * 30 * 24 * 4 + 8 * 30 * 24 * 4]
border1s = [0, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len,
12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len]
border2s = [12 * 30 * 24 * 4 + 4 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 8 * 30 * 24 * 4,
12 * 30 * 24 * 4 + 8 * 30 * 24 * 4]
else:
border1s = [0, 12 * 30 * 24 * 4 - self.seq_len, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len]
border2s = [12 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 4 * 30 * 24 * 4, 12 * 30 * 24 * 4 + 8 * 30 * 24 * 4]
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.set_type == 0:
border2 = (border2 - self.seq_len) * self.percent // 100 + self.seq_len
if self.features == 'M' or self.features == 'MS':
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features == 'S':
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0]:border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[['date']][border1:border2]
df_stamp['date'] = pd.to_datetime(df_stamp.date)
if self.timeenc == 0:
df_stamp['month'] = df_stamp.date.apply(lambda row: row.month, 1)
df_stamp['day'] = df_stamp.date.apply(lambda row: row.day, 1)
df_stamp['weekday'] = df_stamp.date.apply(lambda row: row.weekday(), 1)
df_stamp['hour'] = df_stamp.date.apply(lambda row: row.hour, 1)
df_stamp['minute'] = df_stamp.date.apply(lambda row: row.minute, 1)
df_stamp['minute'] = df_stamp.minute.map(lambda x: x // 15)
data_stamp = df_stamp.drop(['date'], 1).values
elif self.timeenc == 1:
data_stamp = time_features(pd.to_datetime(df_stamp['date'].values), freq=self.freq)
data_stamp = data_stamp.transpose(1, 0)
self.data_x = data[border1:border2]
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
feat_id = index // self.tot_len
s_begin = index % self.tot_len
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end, feat_id:feat_id + 1]
seq_y = self.data_y[r_begin:r_end, feat_id:feat_id + 1]
seq_x_mark = self.data_stamp[s_begin:s_end]
seq_y_mark = self.data_stamp[r_begin:r_end]
return seq_x, seq_y, seq_x_mark, seq_y_mark
def __len__(self):
return (len(self.data_x) - self.seq_len - self.pred_len + 1) * self.enc_in
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)

View File

@ -264,4 +264,4 @@ accelerator.wait_for_everyone()
if accelerator.is_local_main_process:
path = './checkpoints' # unique checkpoint saving path
del_files(path) # delete checkpoint files
accelerator.print('success delete checkpoints')
accelerator.print('success delete checkpoints')

268
run_pretrain.py Normal file
View File

@ -0,0 +1,268 @@
import argparse
import torch
from accelerate import Accelerator, DeepSpeedPlugin
from accelerate import DistributedDataParallelKwargs
from torch import nn, optim
from torch.optim import lr_scheduler
from data_provider_pretrain.data_factory import data_provider
from models import Autoformer, DLinear, TimeLLM
import time
import random
import numpy as np
import os
os.environ['CURL_CA_BUNDLE'] = ''
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:64"
from utils.tools import del_files, EarlyStopping, adjust_learning_rate, vali, load_content
parser = argparse.ArgumentParser(description='Time-LLM')
fix_seed = 2021
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)
# basic config
parser.add_argument('--task_name', type=str, required=True, default='long_term_forecast',
help='task name, options:[long_term_forecast, short_term_forecast, imputation, classification, anomaly_detection]')
parser.add_argument('--is_training', type=int, required=True, default=1, help='status')
parser.add_argument('--model_id', type=str, required=True, default='test', help='model id')
parser.add_argument('--model_comment', type=str, required=True, default='none', help='prefix when saving test results')
parser.add_argument('--model', type=str, required=True, default='Autoformer',
help='model name, options: [Autoformer, DLinear]')
parser.add_argument('--seed', type=int, default=2021, help='random seed')
# data loader
parser.add_argument('--data_pretrain', type=str, required=True, default='ETTm1', help='dataset type')
parser.add_argument('--data', type=str, required=True, default='ETTm1', help='dataset type')
parser.add_argument('--root_path', type=str, default='./dataset', help='root path of the data file')
parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file')
parser.add_argument('--data_path_pretrain', type=str, default='ETTh1.csv', help='data file')
parser.add_argument('--features', type=str, default='M',
help='forecasting task, options:[M, S, MS]; '
'M:multivariate predict multivariate, S: univariate predict univariate, '
'MS:multivariate predict univariate')
parser.add_argument('--target', type=str, default='OT', help='target feature in S or MS task')
parser.add_argument('--loader', type=str, default='modal', help='dataset type')
parser.add_argument('--freq', type=str, default='h',
help='freq for time features encoding, '
'options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], '
'you can also use more detailed freq like 15min or 3h')
parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')
# forecasting task
parser.add_argument('--seq_len', type=int, default=96, help='input sequence length')
parser.add_argument('--label_len', type=int, default=48, help='start token length')
parser.add_argument('--pred_len', type=int, default=96, help='prediction sequence length')
parser.add_argument('--seasonal_patterns', type=str, default='Monthly', help='subset for M4')
# model define
parser.add_argument('--enc_in', type=int, default=7, help='encoder input size')
parser.add_argument('--dec_in', type=int, default=7, help='decoder input size')
parser.add_argument('--c_out', type=int, default=7, help='output size')
parser.add_argument('--d_model', type=int, default=16, help='dimension of model')
parser.add_argument('--n_heads', type=int, default=8, help='num of heads')
parser.add_argument('--e_layers', type=int, default=2, help='num of encoder layers')
parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')
parser.add_argument('--d_ff', type=int, default=32, help='dimension of fcn')
parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')
parser.add_argument('--factor', type=int, default=1, help='attn factor')
parser.add_argument('--dropout', type=float, default=0.1, help='dropout')
parser.add_argument('--embed', type=str, default='timeF',
help='time features encoding, options:[timeF, fixed, learned]')
parser.add_argument('--activation', type=str, default='gelu', help='activation')
parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')
parser.add_argument('--patch_len', type=int, default=16, help='patch length')
parser.add_argument('--stride', type=int, default=8, help='stride')
parser.add_argument('--prompt_domain', type=int, default=0, help='stride')
# optimization
parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')
parser.add_argument('--itr', type=int, default=1, help='experiments times')
parser.add_argument('--train_epochs', type=int, default=10, help='train epochs')
parser.add_argument('--align_epochs', type=int, default=10, help='alignment epochs')
parser.add_argument('--batch_size', type=int, default=32, help='batch size of train input data')
parser.add_argument('--eval_batch_size', type=int, default=8, help='batch size of model evaluation')
parser.add_argument('--patience', type=int, default=5, help='early stopping patience')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')
parser.add_argument('--des', type=str, default='test', help='exp description')
parser.add_argument('--loss', type=str, default='MSE', help='loss function')
parser.add_argument('--lradj', type=str, default='type1', help='adjust learning rate')
parser.add_argument('--pct_start', type=float, default=0.2, help='pct_start')
parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)
parser.add_argument('--llm_layers', type=int, default=6)
parser.add_argument('--percent', type=int, default=100)
args = parser.parse_args()
ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
deepspeed_plugin = DeepSpeedPlugin(hf_ds_config='./ds_config_zero2.json')
accelerator = Accelerator(kwargs_handlers=[ddp_kwargs], deepspeed_plugin=deepspeed_plugin)
for ii in range(args.itr):
# setting record of experiments
setting = '{}_{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_{}_{}'.format(
args.task_name,
args.model_id,
args.model,
args.data,
args.features,
args.seq_len,
args.label_len,
args.pred_len,
args.d_model,
args.n_heads,
args.e_layers,
args.d_layers,
args.d_ff,
args.factor,
args.embed,
args.des, ii)
train_data, train_loader = data_provider(args, args.data_pretrain, args.data_path_pretrain, True, 'train')
vali_data, vali_loader = data_provider(args, args.data_pretrain, args.data_path_pretrain, True, 'val')
test_data, test_loader = data_provider(args, args.data, args.data_path, False, 'test')
if args.model == 'Autoformer':
model = Autoformer.Model(args).float()
elif args.model == 'DLinear':
model = DLinear.Model(args).float()
else:
model = TimeLLM.Model(args).float()
path = os.path.join(args.checkpoints,
setting + '-' + args.model_comment) # unique checkpoint saving path
args.content = load_content(args)
if not os.path.exists(path) and accelerator.is_local_main_process:
os.makedirs(path)
time_now = time.time()
train_steps = len(train_loader)
early_stopping = EarlyStopping(accelerator=accelerator, patience=args.patience)
trained_parameters = []
for p in model.parameters():
if p.requires_grad is True:
trained_parameters.append(p)
model_optim = optim.Adam(trained_parameters, lr=args.learning_rate)
if args.lradj == 'COS':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(model_optim, T_max=20, eta_min=1e-8)
else:
scheduler = lr_scheduler.OneCycleLR(optimizer=model_optim,
steps_per_epoch=train_steps,
pct_start=args.pct_start,
epochs=args.train_epochs,
max_lr=args.learning_rate)
criterion = nn.MSELoss()
mae_metric = nn.L1Loss()
train_loader, vali_loader, test_loader, model, model_optim, scheduler = accelerator.prepare(
train_loader, vali_loader, test_loader, model, model_optim, scheduler)
if args.use_amp:
scaler = torch.cuda.amp.GradScaler()
for epoch in range(args.train_epochs):
iter_count = 0
train_loss = []
model.train()
epoch_time = time.time()
for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):
iter_count += 1
model_optim.zero_grad()
batch_x = batch_x.float().to(accelerator.device)
batch_y = batch_y.float().to(accelerator.device)
batch_x_mark = batch_x_mark.float().to(accelerator.device)
batch_y_mark = batch_y_mark.float().to(accelerator.device)
# decoder input
dec_inp = torch.zeros_like(batch_y[:, -args.pred_len:, :]).float().to(
accelerator.device)
dec_inp = torch.cat([batch_y[:, :args.label_len, :], dec_inp], dim=1).float().to(
accelerator.device)
# encoder - decoder
if args.use_amp:
with torch.cuda.amp.autocast():
if args.output_attention:
outputs = model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
f_dim = -1 if args.features == 'MS' else 0
outputs = outputs[:, -args.pred_len:, f_dim:]
batch_y = batch_y[:, -args.pred_len:, f_dim:].to(accelerator.device)
loss = criterion(outputs, batch_y)
train_loss.append(loss.item())
else:
if args.output_attention:
outputs = model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
f_dim = -1 if args.features == 'MS' else 0
outputs = outputs[:, -args.pred_len:, f_dim:]
batch_y = batch_y[:, -args.pred_len:, f_dim:]
loss = criterion(outputs, batch_y)
train_loss.append(loss.item())
if (i + 1) % 100 == 0:
accelerator.print(
"\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
speed = (time.time() - time_now) / iter_count
left_time = speed * ((args.train_epochs - epoch) * train_steps - i)
accelerator.print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
iter_count = 0
time_now = time.time()
if args.use_amp:
scaler.scale(loss).backward()
scaler.step(model_optim)
scaler.update()
else:
accelerator.backward(loss)
model_optim.step()
if args.lradj == 'TST':
adjust_learning_rate(accelerator, model_optim, scheduler, epoch + 1, args, printout=False)
scheduler.step()
accelerator.print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - epoch_time))
train_loss = np.average(train_loss)
vali_loss, vali_mae_loss = vali(args, accelerator, model, vali_data, vali_loader, criterion, mae_metric)
test_loss, test_mae_loss = vali(args, accelerator, model, test_data, test_loader, criterion, mae_metric)
accelerator.print(
"Epoch: {0} | Train Loss: {1:.7f} Vali Loss: {2:.7f} Test Loss: {3:.7f} MAE Loss: {4:.7f}".format(
epoch + 1, train_loss, vali_loss, test_loss, test_mae_loss))
early_stopping(vali_loss, model, path)
if early_stopping.early_stop:
accelerator.print("Early stopping")
break
if args.lradj != 'TST':
if args.lradj == 'COS':
scheduler.step()
accelerator.print("lr = {:.10f}".format(model_optim.param_groups[0]['lr']))
else:
if epoch == 0:
args.learning_rate = model_optim.param_groups[0]['lr']
accelerator.print("lr = {:.10f}".format(model_optim.param_groups[0]['lr']))
adjust_learning_rate(accelerator, model_optim, scheduler, epoch + 1, args, printout=True)
else:
accelerator.print('Updating learning rate to {}'.format(scheduler.get_last_lr()[0]))
accelerator.wait_for_everyone()
if accelerator.is_local_main_process:
path = './checkpoints' # unique checkpoint saving path
del_files(path) # delete checkpoint files
accelerator.print('success delete checkpoints')

View File

@ -0,0 +1,124 @@
model_name=TimeLLM
learning_rate=0.01
llama_layers=32
master_port=00097
num_process=8
batch_size=24
d_model=32
d_ff=128
comment='TimeLLM-ETTh1_ETTh2'
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run_pretrain.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/ETT-small/ \
--data_path_pretrain ETTh1.csv \
--data_path ETTh2.csv \
--model_id ETTh1_ETTh2_512_96 \
--model $model_name \
--data_pretrain ETTh1 \
--data ETTh2 \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 96 \
--factor 3 \
--enc_in 7 \
--dec_in 7 \
--c_out 7 \
--des 'Exp' \
--itr 1 \
--d_model $d_model \
--d_ff $d_ff \
--batch_size $batch_size \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs 5 \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run_pretrain.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/ETT-small/ \
--data_path_pretrain ETTh1.csv \
--data_path ETTh2.csv \
--model_id ETTh1_ETTh2_512_192 \
--model $model_name \
--data_pretrain ETTh1 \
--data ETTh2 \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 192 \
--factor 3 \
--enc_in 7 \
--dec_in 7 \
--c_out 7 \
--des 'Exp' \
--itr 1 \
--d_model 32 \
--d_ff 128 \
--batch_size $batch_size \
--learning_rate 0.02 \
--llm_layers $llama_layers \
--train_epochs 5 \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run_pretrain.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/ETT-small/ \
--data_path_pretrain ETTh1.csv \
--data_path ETTh2.csv \
--model_id ETTh1_ETTh2_512_336 \
--model $model_name \
--data_pretrain ETTh1 \
--data ETTh2 \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 336 \
--factor 3 \
--enc_in 7 \
--dec_in 7 \
--c_out 7 \
--des 'Exp' \
--itr 1 \
--d_model $d_model \
--d_ff $d_ff \
--batch_size $batch_size \
--lradj 'COS'\
--learning_rate 0.001 \
--llm_layers $llama_layers \
--train_epochs 5 \
--model_comment $comment
accelerate launch --multi_gpu --mixed_precision bf16 --num_processes $num_process --main_process_port $master_port run_pretrain.py \
--task_name long_term_forecast \
--is_training 1 \
--root_path ./dataset/ETT-small/ \
--data_path_pretrain ETTh1.csv \
--data_path ETTh2.csv \
--model_id ETTh1_ETTh2_512_720 \
--model $model_name \
--data_pretrain ETTh1 \
--data ETTh2 \
--features M \
--seq_len 512 \
--label_len 48 \
--pred_len 720 \
--factor 3 \
--enc_in 7 \
--dec_in 7 \
--c_out 7 \
--des 'Exp' \
--itr 1 \
--d_model $d_model \
--d_ff $d_ff \
--batch_size $batch_size \
--learning_rate $learning_rate \
--llm_layers $llama_layers \
--train_epochs 5 \
--model_comment $comment