Decreased learningRate for each subsequent epoch to reduce the chance of jumping out of a local minimum

This commit is contained in:
lluni 2022-05-25 20:56:55 +02:00
parent 1b296bb5d0
commit e19dec4af9

View file

@ -76,7 +76,7 @@ public class Network {
// backward propagation
SimpleMatrix error = lossPrime.apply(y_train[j], output);
for (int k = layers.size() - 1; k >= 0; k--) {
error = layers.get(k).backwardPropagation(error, learningRate);
error = layers.get(k).backwardPropagation(error, learningRate / (i+1));
}
}
// calculate average error on all samples