In the function "train_model()" the for-loop "for asset_name in asset_name_all:" finishes before the model-optimization. So basically the optimized LSTM neural network is always based on the last asset. This can't be on purpose, can it?
best
eduard
def train_model(data):
"""
train the LSTM network
"""
asset_name_all = data.coords['asset'].values
features_all = get_features(data)
target_all = get_target_classes(data)
model = get_model()
for asset_name in asset_name_all:
# drop missing values:
target_cur = target_all.sel(asset=asset_name).dropna('time', 'any')
features_cur = features_all.sel(asset=asset_name).dropna('time', 'any')
# align features and targets:
target_for_learn_df, feature_for_learn_df = xr.align(target_cur, features_cur, join='inner')
criterion = nn.MSELoss() # define loss function
optimiser = optim.LBFGS(model.parameters(), lr=0.08) # we use an LBFGS solver as optimiser
epochs = 1 #how many epochs
for i in range(epochs):
def closure(): # reevaluates the model and returns the loss (forward pass)
optimiser.zero_grad()
#input tensor
in_ = torch.zeros(1,len(feature_for_learn_df.values))
in_[0,:]=torch.tensor(np.array(feature_for_learn_df.values))
#output
out = model(in_)
#target tensor
target = torch.zeros(1,len(target_for_learn_df.values))
target[0,:]=torch.tensor(np.array(target_for_learn_df.values))
#evaluate loss
loss = criterion(out, target)
loss.backward()
return loss
optimiser.step(closure) #updates weights
return model