Decreased learningRate for each subsequent epoch to reduce the chance of jumping out of a local minimum
This commit is contained in:
parent
1b296bb5d0
commit
e19dec4af9
1 changed files with 1 additions and 1 deletions
|
@ -76,7 +76,7 @@ public class Network {
|
|||
// backward propagation
|
||||
SimpleMatrix error = lossPrime.apply(y_train[j], output);
|
||||
for (int k = layers.size() - 1; k >= 0; k--) {
|
||||
error = layers.get(k).backwardPropagation(error, learningRate);
|
||||
error = layers.get(k).backwardPropagation(error, learningRate / (i+1));
|
||||
}
|
||||
}
|
||||
// calculate average error on all samples
|
||||
|
|
Loading…
Reference in a new issue