- 選擇使用 CPU 還是 GPU 進行訓練
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
- 搭建 LSTM 模型
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_stacked_layers,output_size):
super().__init__() #初始化父類中的構造方法
self.hidden_size = hidden_size
self.num_stacked_layers = num_stacked_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_stacked_layers, batch_first=True)#構造lstm模型
self.fc = nn.Linear(hidden_size,output_size) #全連接層
#前向傳播
def forward(self, x):
batch_size = x.size(0)
#初始化隱藏層狀態
h0 = torch.zeros(self.num_stacked_layers, batch_size, self.hidden_size).to(device)
c0 = torch.zeros(self.num_stacked_layers, batch_size, self.hidden_size).to(device)
out, _ = self.lstm(x, (h0, c0)) #分離隱藏狀態,以免梯度爆炸
out = self.fc(out[:, -1, :]) #只要最後一層隱層狀態
return out
#初始化lstm模型
input_size=1 #輸入維度,close
hidden_size=4 #隱藏層維度
num_stacked_layers=1 #lstm層數
output_size=1 #輸出維度,close
model = LSTM(input_size,hidden_size,num_stacked_layers,output_size)
# model.to(device)
參數設置
#定義學習率
learning_rate = 0.001
#定義損失函數
loss_function = nn.MSELoss() #nn.CrossEntropyLoss常用於解決二分類問題,nn.NLLLoss常用於圖像識別
#定義優化器
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) #關於優化算法的選擇https://blog.csdn.net/S20144144/article/details/103417502
- 訓練
def train_one_epoch():
model.train(True)
print(f'Epoch: {epoch + 1}')
running_loss = 0.0
for batch_index, batch in enumerate(train_loader):
x_batch, y_batch = batch[0].to(device), batch[1].to(device)
output = model(x_batch)#前向傳播
loss = loss_function(output, y_batch)#計算損失
running_loss += loss.item()
optimizer.zero_grad() #梯度會一直累加,清零梯度。
loss.backward() #反向傳播
optimizer.step() #更新參數
if batch_index % 100 == 99: # print every 100 batches
avg_loss_across_batches = running_loss / 100
print('Batch {0}, Loss: {1:.3f}'.format(batch_index+1,avg_loss_across_batches))
running_loss = 0.0
print()
- 驗證
def validate_one_epoch():
model.train(False)
running_loss = 0.0
#迭代測試集,獲取數據,預測
for batch_index, batch in enumerate(test_loader):
x_batch, y_batch = batch[0].to(device), batch[1].to(device)
with torch.no_grad():
output = model(x_batch)
loss = loss_function(output, y_batch)
running_loss += loss.item()
#計算
avg_loss_across_batches = running_loss / len(test_loader)
#打印
print('Val Loss: {0:.3f}'.format(avg_loss_across_batches))
- 定義訓練次數
num_epochs = 10
for epoch in range(num_epochs):
train_one_epoch()
validate_one_epoch()
with torch.no_grad():
predicted = model(X_train.to(device)).to('cpu').numpy()
6. 可視化
plt.plot(y_train, label='實際收盤價')
plt.plot(predicted, label='預測收盤價')
plt.xlabel('日期')
plt.ylabel('收盤價')
plt.legend()
plt.show()