I'm building a LSTM and I want to predict s_max
with variable q_max
but the network just seem to alter the input data and give that as an output. I've tried increasing hidden size and epochs but was not successful. I assume there's a problem in the way I've structured the data or the way the network is set up.
Here is the figure of the prediction my model makes:
I literally just want to fit to training data so that I know it can learn a simple problem.
Here is my model:
class LSTM(nn.Module):
def __init__(self, num_classes, input_size, hidden_size, num_layers):
super(LSTM, self).__init__()
self.num_classes = num_classes
self.num_layers = num_layers
self.input_size = input_size
self.hidden_size = hidden_size
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
h_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))
c_0 = Variable(torch.zeros(self.num_layers, x.size(0), self.hidden_size))
ula, (h_out, _) = self.lstm(x, (h_0, c_0))
h_out = h_out.view(-1, self.hidden_size)
out = self.fc(h_out)
return out
Data preprocessing:
def data_manipulator(data):
df = pd.read_hdf(data)
df = df.iloc[:, [1, 4]]
scaler = MinMaxScaler()
scaler = scaler.fit_transform(df)
df = scaler
return pd.DataFrame(df)
def sliding_windows(data, seq_length):
y = np.ones([len(data)-seq_length-1,1])
x = np.ones([len(data)-seq_length-1,seq_length,1])
for i in range(len(data)-seq_length-1):
x[i] = np.array(data.iloc[i:i + seq_length,0]).reshape(-1,1) # ex. [1406, 5, 1]
y[i] = data.iloc[i + seq_length, 1] # ex. [1406, 1]
return torch.tensor(x, dtype=torch.float), torch.tensor(y, dtype=torch.float)
Setup, training, plot:
data_files = glob.glob('data/*.hdf')
seq_length = 5
df = data_manipulator(data_files[0])
x, y = sliding_windows(df, seq_length)
lstm = LSTM(num_classes= 1,input_size=1, hidden_size = 1, num_layers = 1)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(lstm.parameters(), lr=0.001)
num_epochs = 2000
for epoch in range(num_epochs):
optimizer.zero_grad()
outputs = lstm(x)
loss = criterion(outputs, y)
loss.backward()
optimizer.step()
if epoch % 100 == 0:
print("Epoch: %d, loss: %1.5f" % (epoch, loss.item()))
lstm.eval()
output2 = lstm(x).detach().numpy()
plt.plot(df[0], label='q_max train')
plt.plot(df[1], label='s_max train')
plt.plot(output2, label='s_max output with q_max train as input')
plt.legend()
plt.show()
Train output:
Epoch: 0, loss: 0.52164
Epoch: 100, loss: 0.10143
Epoch: 200, loss: 0.04956
Epoch: 300, loss: 0.02736
Epoch: 400, loss: 0.02732
Epoch: 500, loss: 0.02727
Epoch: 600, loss: 0.02722
Epoch: 700, loss: 0.02714
Epoch: 800, loss: 0.02704
Epoch: 900, loss: 0.02689
Epoch: 1000, loss: 0.02663
After speaking to my project supervisor, there are a couple of things I hadn't thought about. First of all, the forward pass returns h_out
instead of the predicted value ula
. Secondly, my function def sliding_windows(data, seq_length):
is a "many to one" network while what he was after was a "many to many" which better suits this application and I'll be working to change the data input and output arcitecture.